{
  "canonical_name": "mem0ai/mem0",
  "compilation_id": "pack_e21873310dba459290e7f2f996e2443e",
  "created_at": "2026-05-16T07:05:50.190556+00:00",
  "created_by": "project-pack-compiler",
  "feedback": {
    "carrier_selection_notes": [
      "viable_asset_types=skill, recipe, host_instruction, eval, preflight",
      "recommended_asset_types=skill, recipe, host_instruction, eval, preflight"
    ],
    "evidence_delta": {
      "confirmed_claims": [
        "identity_anchor_present",
        "capability_and_host_targets_present",
        "install_path_declared_or_better"
      ],
      "missing_required_fields": [],
      "must_verify_forwarded": [
        "Run or inspect `pip install mem0ai` in an isolated environment.",
        "Confirm the project exposes the claimed capability to at least one target host."
      ],
      "quickstart_execution_scope": "allowlisted_sandbox_smoke",
      "sandbox_command": "pip install mem0ai",
      "sandbox_container_image": "python:3.12-slim",
      "sandbox_execution_backend": "docker",
      "sandbox_planner_decision": "llm_execute_isolated_install",
      "sandbox_validation_id": "sbx_0a6e8be374c64e2fbc867aedf29f9ca4"
    },
    "feedback_event_type": "project_pack_compilation_feedback",
    "learning_candidate_reasons": [],
    "template_gaps": []
  },
  "identity": {
    "canonical_id": "project_705bceae51896b86302dd8a2c6555df9",
    "canonical_name": "mem0ai/mem0",
    "homepage_url": null,
    "license": "unknown",
    "repo_url": "https://github.com/mem0ai/mem0",
    "slug": "mem0",
    "source_packet_id": "phit_ceee1cd37c374289a51aac4615bd36ea",
    "source_validation_id": "dval_6c80ffb7d0c04b96bdb9f585b93e58b2"
  },
  "merchandising": {
    "best_for": "需要个人工作台能力，并使用 chatgpt的用户",
    "github_forks": 6263,
    "github_stars": 55263,
    "one_liner_en": "Universal memory layer for AI Agents",
    "one_liner_zh": "Universal memory layer for AI Agents",
    "primary_category": {
      "category_id": "personal-workspace",
      "confidence": "medium",
      "name_en": "Personal Workspace",
      "name_zh": "个人工作台",
      "reason": "matched_keywords:memory"
    },
    "target_user": "使用 chatgpt 等宿主 AI 的用户",
    "title_en": "mem0",
    "title_zh": "mem0 能力包",
    "visible_tags": [
      {
        "label_en": "MCP Tools",
        "label_zh": "MCP 工具",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "product_domain-mcp-tools",
        "type": "product_domain"
      },
      {
        "label_en": "Knowledge Base Q&A",
        "label_zh": "知识库问答",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "user_job-knowledge-base-q-a",
        "type": "user_job"
      },
      {
        "label_en": "Structured Data Extraction",
        "label_zh": "结构化数据提取",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "core_capability-structured-data-extraction",
        "type": "core_capability"
      },
      {
        "label_en": "Multi-role Workflow",
        "label_zh": "多角色协作流程",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "workflow_pattern-multi-role-workflow",
        "type": "workflow_pattern"
      },
      {
        "label_en": "Open Source Tool",
        "label_zh": "开源工具",
        "source": "repo_evidence_project_characteristics",
        "tag_id": "selection_signal-open-source-tool",
        "type": "selection_signal"
      }
    ]
  },
  "packet_id": "phit_ceee1cd37c374289a51aac4615bd36ea",
  "page_model": {
    "artifacts": {
      "artifact_slug": "mem0",
      "files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json",
        "REPO_INSPECTION.md",
        "CAPABILITY_CONTRACT.json",
        "EVIDENCE_INDEX.json",
        "CLAIM_GRAPH.json"
      ],
      "required_files": [
        "PROJECT_PACK.json",
        "QUICK_START.md",
        "PROMPT_PREVIEW.md",
        "HUMAN_MANUAL.md",
        "AI_CONTEXT_PACK.md",
        "BOUNDARY_RISK_CARD.md",
        "PITFALL_LOG.md",
        "REPO_INSPECTION.json"
      ]
    },
    "detail": {
      "capability_source": "Project Hit Packet + DownstreamValidationResult",
      "commands": [
        {
          "command": "pip install mem0ai",
          "label": "Python / pip · 官方安装入口",
          "source": "https://github.com/mem0ai/mem0#readme",
          "verified": true
        }
      ],
      "display_tags": [
        "MCP 工具",
        "知识库问答",
        "结构化数据提取",
        "多角色协作流程",
        "开源工具"
      ],
      "eyebrow": "个人工作台",
      "glance": [
        {
          "body": "判断自己是不是目标用户。",
          "label": "最适合谁",
          "value": "需要个人工作台能力，并使用 chatgpt的用户"
        },
        {
          "body": "先理解能力边界，再决定是否继续。",
          "label": "核心价值",
          "value": "Universal memory layer for AI Agents"
        },
        {
          "body": "未完成验证前保持审慎。",
          "label": "继续前",
          "value": "publish to Doramagic.ai project surfaces"
        }
      ],
      "guardrail_source": "Boundary & Risk Card",
      "guardrails": [
        {
          "body": "Prompt Preview 只展示流程，不证明项目已安装或运行。",
          "label": "Check 1",
          "value": "不要把试用当真实运行"
        },
        {
          "body": "chatgpt",
          "label": "Check 2",
          "value": "确认宿主兼容"
        },
        {
          "body": "publish to Doramagic.ai project surfaces",
          "label": "Check 3",
          "value": "先隔离验证"
        }
      ],
      "mode": "skill, recipe, host_instruction, eval, preflight",
      "pitfall_log": {
        "items": [
          {
            "body": "README/documentation is current enough for a first validation pass.",
            "category": "能力坑",
            "evidence": [
              "capability.assumptions | github_repo:656099147 | https://github.com/mem0ai/mem0 | README/documentation is current enough for a first validation pass."
            ],
            "severity": "medium",
            "suggested_check": "将假设转成下游验证清单。",
            "title": "能力判断依赖假设",
            "user_impact": "假设不成立时，用户拿不到承诺的能力。"
          },
          {
            "body": "未记录 last_activity_observed。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | last_activity_observed missing"
            ],
            "severity": "medium",
            "suggested_check": "补 GitHub 最近 commit、release、issue/PR 响应信号。",
            "title": "维护活跃度未知",
            "user_impact": "新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "downstream_validation.risk_items | github_repo:656099147 | https://github.com/mem0ai/mem0 | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "进入安全/权限治理复核队列。",
            "title": "下游验证发现风险项",
            "user_impact": "下游已经要求复核，不能在页面中弱化。"
          },
          {
            "body": "no_demo",
            "category": "安全/权限坑",
            "evidence": [
              "risks.scoring_risks | github_repo:656099147 | https://github.com/mem0ai/mem0 | no_demo; severity=medium"
            ],
            "severity": "medium",
            "suggested_check": "把风险写入边界卡，并确认是否需要人工复核。",
            "title": "存在评分风险",
            "user_impact": "风险会影响是否适合普通用户安装。"
          },
          {
            "body": "issue_or_pr_quality=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | issue_or_pr_quality=unknown"
            ],
            "severity": "low",
            "suggested_check": "抽样最近 issue/PR，判断是否长期无人处理。",
            "title": "issue/PR 响应质量未知",
            "user_impact": "用户无法判断遇到问题后是否有人维护。"
          },
          {
            "body": "release_recency=unknown。",
            "category": "维护坑",
            "evidence": [
              "evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | release_recency=unknown"
            ],
            "severity": "low",
            "suggested_check": "确认最近 release/tag 和 README 安装命令是否一致。",
            "title": "发布节奏不明确",
            "user_impact": "安装命令和文档可能落后于代码，用户踩坑概率升高。"
          }
        ],
        "source": "ProjectPitfallLog + ProjectHitPacket + validation + community signals",
        "summary": "发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。",
        "title": "踩坑日志"
      },
      "snapshot": {
        "contributors": 313,
        "forks": 6263,
        "license": "unknown",
        "note": "站点快照，非实时质量证明；用于开工前背景判断。",
        "stars": 55263
      },
      "source_url": "https://github.com/mem0ai/mem0",
      "steps": [
        {
          "body": "不安装项目，先体验能力节奏。",
          "code": "preview",
          "title": "先试 Prompt"
        },
        {
          "body": "理解输入、输出、失败模式和边界。",
          "code": "manual",
          "title": "读说明书"
        },
        {
          "body": "把上下文交给宿主 AI 继续工作。",
          "code": "context",
          "title": "带给 AI"
        },
        {
          "body": "进入主力环境前先完成安装入口与风险边界验证。",
          "code": "verify",
          "title": "沙箱验证"
        }
      ],
      "subtitle": "Universal memory layer for AI Agents",
      "title": "mem0 能力包",
      "trial_prompt": "# mem0 - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for mem0ai/mem0.\n\nProject:\n- Name: mem0\n- Repository: https://github.com/mem0ai/mem0\n- Summary: Universal memory layer for AI Agents\n- Host target: chatgpt\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: Universal memory layer for AI Agents\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n- Capability 2: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. page-introduction: Introduction to Mem0. Produce one small intermediate artifact and wait for confirmation.\n2. page-quickstart: Quick Start Guide. Produce one small intermediate artifact and wait for confirmation.\n3. page-architecture: System Architecture. Produce one small intermediate artifact and wait for confirmation.\n4. page-memory-operations: Memory Operations. Produce one small intermediate artifact and wait for confirmation.\n5. page-ai-integration: AI Model Integration. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/mem0ai/mem0\n- https://github.com/mem0ai/mem0#readme\n- mem0-plugin/skills/mem0/SKILL.md\n- mem0-plugin/skills/mem0-mcp/SKILL.md\n- openclaw/skills/memory-dream/SKILL.md\n- openclaw/skills/memory-triage/SKILL.md\n- skills/mem0/SKILL.md\n- skills/mem0-cli/SKILL.md\n- skills/mem0-integrate/SKILL.md\n- skills/mem0-test-integration/SKILL.md\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "voices": [
        {
          "body": "来源平台：github。github/github_issue: Add boto3 timeout configuration support for AWS Bedrock client（https://github.com/mem0ai/mem0/issues/3825）；github/github_issue: feat(openclaw): expose Qdrant payload filter in recall path for category（https://github.com/mem0ai/mem0/issues/5063）；github/github_issue: Type-Aware Memory Retrieval with Support for Deterministic (Persistent) （https://github.com/mem0ai/mem0/issues/4926）；github/github_issue: Why has the Dockerhub Image for the OSS Rest API not been updated（https://github.com/mem0ai/mem0/issues/5090）；github/github_release: Mem0 Python SDK (v2.0.2)（https://github.com/mem0ai/mem0/releases/tag/v2.0.2）；github/github_release: Mem0 Node SDK (v3.0.2)（https://github.com/mem0ai/mem0/releases/tag/ts-v3.0.3）；github/github_release: Mem0 OpenClaw Plugin (v1.0.11)（https://github.com/mem0ai/mem0/releases/tag/openclaw-v1.0.11）；github/github_release: Mem0 Python SDK (v2.0.1)（https://github.com/mem0ai/mem0/releases/tag/v2.0.1）；github/github_release: Mem0 Node SDK (v3.0.2)（https://github.com/mem0ai/mem0/releases/tag/ts-v3.0.2）；github/github_release: Mem0 OpenClaw Plugin (v1.0.10)（https://github.com/mem0ai/mem0/releases/tag/openclaw-v1.0.10）；github/github_release: Mem0 OpenClaw Plugin (v1.0.9)（https://github.com/mem0ai/mem0/releases/tag/openclaw-v1.0.9）；github/github_release: Mem0 OpenClaw Plugin (v1.0.8)（https://github.com/mem0ai/mem0/releases/tag/openclaw-v1.0.8）。这些是项目级外部声音，不作为单独质量证明。",
          "items": [
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Add boto3 timeout configuration support for AWS Bedrock client",
              "url": "https://github.com/mem0ai/mem0/issues/3825"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "feat(openclaw): expose Qdrant payload filter in recall path for category",
              "url": "https://github.com/mem0ai/mem0/issues/5063"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Type-Aware Memory Retrieval with Support for Deterministic (Persistent) ",
              "url": "https://github.com/mem0ai/mem0/issues/4926"
            },
            {
              "kind": "github_issue",
              "source": "github",
              "title": "Why has the Dockerhub Image for the OSS Rest API not been updated",
              "url": "https://github.com/mem0ai/mem0/issues/5090"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "Mem0 Python SDK (v2.0.2)",
              "url": "https://github.com/mem0ai/mem0/releases/tag/v2.0.2"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "Mem0 Node SDK (v3.0.2)",
              "url": "https://github.com/mem0ai/mem0/releases/tag/ts-v3.0.3"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "Mem0 OpenClaw Plugin (v1.0.11)",
              "url": "https://github.com/mem0ai/mem0/releases/tag/openclaw-v1.0.11"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "Mem0 Python SDK (v2.0.1)",
              "url": "https://github.com/mem0ai/mem0/releases/tag/v2.0.1"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "Mem0 Node SDK (v3.0.2)",
              "url": "https://github.com/mem0ai/mem0/releases/tag/ts-v3.0.2"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "Mem0 OpenClaw Plugin (v1.0.10)",
              "url": "https://github.com/mem0ai/mem0/releases/tag/openclaw-v1.0.10"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "Mem0 OpenClaw Plugin (v1.0.9)",
              "url": "https://github.com/mem0ai/mem0/releases/tag/openclaw-v1.0.9"
            },
            {
              "kind": "github_release",
              "source": "github",
              "title": "Mem0 OpenClaw Plugin (v1.0.8)",
              "url": "https://github.com/mem0ai/mem0/releases/tag/openclaw-v1.0.8"
            }
          ],
          "status": "已收录 12 条来源",
          "title": "社区讨论"
        }
      ]
    },
    "homepage_card": {
      "category": "个人工作台",
      "desc": "Universal memory layer for AI Agents",
      "effort": "安装已验证",
      "forks": 6263,
      "icon": "notebook",
      "name": "mem0 能力包",
      "risk": "可发布",
      "slug": "mem0",
      "stars": 55263,
      "tags": [
        "MCP 工具",
        "知识库问答",
        "结构化数据提取",
        "多角色协作流程",
        "开源工具"
      ],
      "thumb": "blue",
      "type": "Skill Pack"
    },
    "manual": {
      "markdown": "# https://github.com/mem0ai/mem0 项目说明书\n\n生成时间：2026-05-16 07:03:34 UTC\n\n## 目录\n\n- [Introduction to Mem0](#page-introduction)\n- [Quick Start Guide](#page-quickstart)\n- [Use Cases and Applications](#page-use-cases)\n- [System Architecture](#page-architecture)\n- [Memory Operations](#page-memory-operations)\n- [AI Model Integration](#page-ai-integration)\n- [Vector Stores and Storage](#page-vector-stores)\n- [Embedding Models](#page-embeddings)\n- [Python SDK](#page-python-sdk)\n- [TypeScript/Node.js SDK](#page-typescript-sdk)\n\n<a id='page-introduction'></a>\n\n## Introduction to Mem0\n\n### 相关页面\n\n相关主题：[System Architecture](#page-architecture), [Quick Start Guide](#page-quickstart), [Use Cases and Applications](#page-use-cases)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/mem0ai/mem0/blob/main/README.md)\n- [evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n- [cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n- [cli/python/README.md](https://github.com/mem0ai/mem0/blob/main/cli/python/README.md)\n- [mem0-ts/src/oss/README.md](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/oss/README.md)\n- [skills/README.md](https://github.com/mem0ai/mem0/blob/main/skills/README.md)\n</details>\n\n# Introduction to Mem0\n\nMem0 is an open-source memory infrastructure designed specifically for AI agents and applications. It provides intelligent, persistent memory management that enables AI systems to retain, retrieve, and utilize information across conversations and sessions. Unlike traditional retrieval-augmented generation (RAG) approaches that treat all context equally, Mem0 implements a hierarchical memory system that automatically prioritizes and maintains relevant information over time.\n\nThe platform addresses one of the most significant challenges in AI development: creating systems that can remember user preferences, conversation context, and learned facts in a way that feels natural and improves over time. Mem0 serves as a foundational layer for building production-ready AI agents with scalable long-term memory capabilities.\n\n## Core Concepts\n\n### Memory Hierarchy\n\nMem0 organizes memory into multiple scopes, enabling fine-grained control over information retention and retrieval. The system distinguishes between user-level, agent-level, and session-level memories, allowing developers to choose the appropriate context for different types of information.\n\n| Scope Level | Description | Use Case |\n|-------------|-------------|----------|\n| **User** | Global preferences and facts about a specific user | User preferences, historical context |\n| **Agent** | Information relevant to a specific AI agent instance | Agent-specific learning, personality traits |\n| **Session** | Temporary context within a single conversation | Current discussion topics, immediate context |\n| **Run** | Information specific to a particular execution context | Workflow-specific state |\n\n### Memory Operations\n\nThe memory system supports four fundamental operations that form the backbone of all interactions:\n\n**Add** - Stores new information in the memory system with automatic entity extraction and deduplication. The system intelligently parses input to identify key facts, relationships, and metadata.\n\n**Search** - Retrieves relevant memories using vector similarity search combined with semantic understanding. The search operation supports hybrid queries that combine keyword matching with semantic similarity.\n\n**Update** - Modifies existing memories when new information supersedes or refines previously stored facts. The system maintains version history for audit purposes.\n\n**Delete** - Removes specific memories or bulk deletes based on scope filters. Supports soft deletes and hard deletes depending on compliance requirements.\n\n## Architecture Overview\n\nMem0's architecture is designed with modularity and extensibility in mind. The system consists of several interconnected components that work together to provide seamless memory management.\n\n```mermaid\ngraph TD\n    A[AI Agent / Application] --> B[Mem0 API Layer]\n    B --> C[Memory Core Engine]\n    C --> D[Vector Store]\n    C --> E[Graph Store]\n    C --> F[SQLite / Database]\n    D --> G[Embedding Models]\n    E --> H[Entity Extraction]\n    F --> I[Metadata Storage]\n    B --> J[LLM Integration]\n    J --> K[Fact Extraction]\n    J --> L[Memory Synthesis]\n```\n\n### Key Components\n\n| Component | Function | Extensible |\n|-----------|----------|------------|\n| **API Layer** | REST interface for memory operations | Yes - custom endpoints |\n| **Memory Core** | Orchestrates memory operations | Yes - custom strategies |\n| **Vector Store** | Stores embeddings for semantic search | Yes - multiple backends |\n| **Graph Store** | Manages entity relationships | Yes - Neo4j, in-memory |\n| **LLM Integration** | Powers extraction and synthesis | Yes - OpenAI, Anthropic, local |\n| **Embedding Service** | Generates vector representations | Yes - OpenAI, HuggingFace |\n\n## Deployment Options\n\nMem0 offers multiple deployment options to meet different organizational requirements and use cases.\n\n### Cloud Platform\n\nThe managed Mem0 Platform provides a fully hosted solution with zero infrastructure management. Users can sign up at app.mem0.ai and immediately begin using the memory infrastructure via SDK or API keys. The cloud platform includes built-in monitoring, automatic scaling, and enterprise-grade security features.\n\n### Self-Hosted Server\n\nFor organizations requiring on-premise deployment or data sovereignty, Mem0 provides a self-hosted option using Docker Compose. The server includes a web-based dashboard for configuration and management.\n\n```bash\n# Recommended bootstrap command\ncd server && make bootstrap\n\n# Manual start\ncd server && docker compose up -d\n```\n\nSelf-hosted deployments support authentication out of the box, with options to configure admin accounts and API keys through a setup wizard or environment variables. The `ADMIN_API_KEY` environment variable enables programmatic admin creation for automated deployments.\n\n### Python SDK\n\nThe primary Python SDK provides the most comprehensive feature set for Python-based applications:\n\n```bash\npip install mem0ai\n```\n\nFor NLP-enhanced features including BM25 keyword matching and entity extraction:\n\n```bash\npip install mem0ai[nlp]\npython -m spacy download en_core_web_sm\n```\n\n### TypeScript/JavaScript SDK\n\nThe official npm package provides TypeScript-first support for JavaScript and TypeScript applications:\n\n```bash\nnpm install mem0ai\n```\n\nThe TypeScript implementation (`mem0-ts`) offers an alternative open-source option using OpenAI for embeddings and completions, with SQLite-based history tracking and optional graph-based memory relationships.\n\n### CLI Tools\n\nCommand-line interfaces are available for both Python and Node.js environments:\n\n```bash\n# Python CLI\npip install mem0-cli\n\n# Node.js CLI\nnpm install -g @mem0/cli\n```\n\n## Key Features\n\n### Intelligent Memory Extraction\n\nMem0 automatically extracts entities, facts, and relationships from conversational input. The extraction process uses large language models to understand context and identify meaningful information that should be stored. This reduces the burden on developers to explicitly specify what to remember.\n\n### Hybrid Search Capabilities\n\nMemory retrieval combines multiple search techniques for optimal results:\n\n- **Vector similarity search** - Finds semantically similar memories using embeddings\n- **BM25 keyword matching** - Ensures exact keyword matches are captured\n- **Entity extraction** - Identifies specific entities for targeted retrieval\n\n### Graph-Based Memory (Mem0+)\n\nAn enhanced version called Mem0+ adds graph-based relationship tracking, enabling the system to understand connections between entities and facts. This is particularly useful for complex reasoning tasks that require understanding relationships between different pieces of information.\n\n### Custom Instructions\n\nMem0 supports custom extraction instructions that guide the memory system to prioritize specific types of information based on use case requirements. The platform can auto-generate these instructions based on a description of the application domain.\n\n## Configuration and Customization\n\n### Embedding Models\n\nMem0 supports multiple embedding providers and models:\n\n| Provider | Default Model | Custom Model Support |\n|----------|--------------|---------------------|\n| OpenAI | text-embedding-3-small | Yes |\n| HuggingFace | Various sentence-transformers | Yes |\n| Azure OpenAI | text-embedding-3-small | Yes |\n\n### LLM Configuration\n\nLanguage model settings control fact extraction and memory synthesis:\n\n- Provider selection (OpenAI, Anthropic, local models)\n- Model selection per operation type\n- API key management and key rotation\n- Temperature and generation parameters\n\n### Memory Storage\n\nConfigurable storage backends adapt to different deployment requirements:\n\n```mermaid\ngraph LR\n    A[Memory Write] --> B{Storage Backend}\n    B --> C[In-Memory]\n    B --> D[SQLite]\n    B --> E[PostgreSQL + pgvector]\n    B --> F[Qdrant]\n    B --> G[ChromaDB]\n    B --> H[Weaviate]\n```\n\n## Use Cases\n\nMem0 supports a wide range of applications where persistent memory is valuable:\n\n**Personal AI Assistants** - Maintain user preferences, conversation history, and learned habits across sessions to provide increasingly personalized experiences.\n\n**Customer Service Bots** - Remember customer context across multiple support interactions, eliminating the need for customers to repeat information.\n\n**Developer Tools** - Enable AI coding assistants to learn team conventions, project-specific patterns, and individual developer preferences.\n\n**Healthcare Applications** - Maintain patient history and context across appointments while ensuring data privacy and compliance.\n\n**Educational Platforms** - Track student progress, learning preferences, and knowledge gaps to provide personalized tutoring experiences.\n\n## Evaluation Framework\n\nMem0 includes a comprehensive evaluation framework for assessing memory system performance across different scenarios. The framework supports comparison between multiple memory techniques including base Mem0, Mem0+, RAG implementations, and LangMem.\n\n| Technique | Command | Description |\n|-----------|---------|-------------|\n| `run-mem0-add` | Add memories using Mem0 | Standard memory addition |\n| `run-mem0-search` | Search memories using Mem0 | Standard memory retrieval |\n| `run-mem0-plus-add` | Add memories using Mem0+ | Graph-enhanced addition |\n| `run-mem0-plus-search` | Search memories using Mem0+ | Graph-enhanced retrieval |\n| `run-rag` | RAG with chunk size 500 | Baseline RAG comparison |\n\nThe evaluation framework uses Makefile commands for standardized testing and supports custom parameter configuration via command-line arguments.\n\n## Citation\n\nIf you use Mem0 in your research or development, please cite the following paper:\n\n```bibtex\n@article{mem0,\n  title={Mem0: Building Production-Ready AI Agents with Scalable Long-Term Memory},\n  author={Chhikara, Prateek and Khant, Dev and Aryan, Saket and Singh, Taranjeet and Yadav, Deshraj},\n  journal={arXiv preprint arXiv:2504.19413},\n  year={2025}\n}\n```\n\n## License\n\nMem0 is released under the Apache 2.0 license, enabling both commercial and open-source usage with minimal restrictions. The permissive license allows integration into proprietary applications while requiring attribution and preservation of copyright notices.\n\n---\n\n<a id='page-quickstart'></a>\n\n## Quick Start Guide\n\n### 相关页面\n\n相关主题：[Introduction to Mem0](#page-introduction), [Python SDK](#page-python-sdk), [TypeScript/Node.js SDK](#page-typescript-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/mem0ai/mem0/blob/main/README.md)\n- [evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n- [cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n- [openmemory/api/README.md](https://github.com/mem0ai/mem0/blob/main/openmemory/api/README.md)\n- [server/dashboard/src/app/setup/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/setup/page.tsx)\n- [server/dashboard/src/app/(root)/dashboard/configuration/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/configuration/page.tsx)\n- [server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx)\n</details>\n\n# Quick Start Guide\n\nMem0 provides a comprehensive memory infrastructure for AI applications, enabling persistent, personalized, and adaptive AI experiences. This guide covers all deployment options to get you up and running quickly.\n\n## Overview\n\nMem0 is a production-ready memory layer for AI agents that handles memory management including storing, retrieving, and updating user/agent memories across interactions. The platform supports multiple deployment options: cloud-hosted, self-hosted server, and local SDK integration.\n\n**Key Features:**\n- Multi-level memory (user, agent, session, app)\n- Hybrid search with semantic and keyword matching\n- Entity extraction and relationship tracking\n- Cloud, self-hosted, and SDK deployment options\n- Cross-platform SDK support (Python, Node.js, CLI)\n\n资料来源：[README.md:1-30]()\n\n## Installation Methods\n\nMem0 supports multiple installation pathways depending on your use case and deployment preference.\n\n### Python SDK\n\nInstall the core Mem0 package via pip:\n\n```bash\npip install mem0ai\n```\n\nFor enhanced search capabilities with NLP support (BM25 keyword matching and entity extraction):\n\n```bash\npip install mem0ai[nlp]\npython -m spacy download en_core_web_sm\n```\n\n资料来源：[README.md:12-16]()\n\n### Node.js SDK\n\nFor JavaScript/TypeScript environments:\n\n```bash\nnpm install mem0ai\n```\n\n资料来源：[README.md:20-22]()\n\n### CLI Tool\n\nInstall the Mem0 CLI for terminal-based memory management:\n\n```bash\nnpm install -g @mem0/cli   # or: pip install mem0-cli\n```\n\n资料来源：[README.md:24-26]()\n\n## Deployment Options\n\nMem0 offers three deployment models to fit different infrastructure requirements.\n\n```mermaid\ngraph TD\n    A[Mem0 Deployment Options] --> B[Cloud Platform]\n    A --> C[Self-Hosted Server]\n    A --> D[Local SDK Integration]\n    \n    B --> B1[app.mem0.ai]\n    B --> B2[API Key Required]\n    \n    C --> C1[Docker Compose]\n    C --> C2[Custom Configuration]\n    \n    D --> D1[Python SDK]\n    D --> D2[Node.js SDK]\n```\n\n资料来源：[README.md:1-30]()\n\n### Cloud Platform\n\nThe quickest path to production memory infrastructure:\n\n1. Sign up at [Mem0 Platform](https://app.mem0.ai?utm_source=oss&utm_medium=readme)\n2. Embed the memory layer via SDK or API keys\n3. Start using memory operations immediately\n\n资料来源：[README.md:28-32]()\n\n### Self-Hosted Server\n\nFor organizations requiring full control over their infrastructure.\n\n#### Quick Bootstrap (Recommended)\n\n```bash\ncd server && make bootstrap\n```\n\nThis single command starts the Docker stack, creates an admin account, and issues your first API key.\n\n#### Manual Setup\n\n```bash\ncd server && docker compose up -d\n```\n\nAccess the setup wizard at `http://localhost:3000`.\n\n> **Note:** Self-hosted authentication is enabled by default. If upgrading from a pre-auth build, set `ADMIN_API_KEY`, register an admin through the wizard, or use `AUTH_DISABLED=true` for local development only.\n\n资料来源：[README.md:17-19]()\n\n**Configuration Requirements:**\nFor detailed configuration options, refer to the [self-hosted documentation](https://docs.mem0.ai/open-source/overview).\n\n资料来源：[README.md:18-19]()\n\n## Initial Setup Workflow\n\n```mermaid\ngraph LR\n    A[Initialize Mem0] --> B[Configure Provider]\n    B --> C[Set API Keys]\n    C --> D[Add Memories]\n    D --> E[Search/Retrieve]\n    \n    F[CLI: mem0 init] --> A\n    G[SDK: Mem0() config] --> A\n```\n\n### Web Dashboard Setup\n\nThe self-hosted server includes a guided setup wizard with the following steps:\n\n| Step | Title | Description |\n|------|-------|-------------|\n| 0 | Create Admin Account | Set up initial admin credentials (name, email, password) |\n| 1 | Configure Provider | Select LLM provider and enter API credentials |\n| 2 | Select Use Case | Choose preset or enter custom use case for instruction generation |\n| 3 | Generate Instructions | Auto-generate custom memory extraction instructions |\n| 4 | Test Setup | Verify configuration with a test API call |\n\n资料来源：[server/dashboard/src/app/setup/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/setup/page.tsx)\n\n**Setup Commands Example:**\n\n```bash\ncurl -X POST ${apiUrl}/memories \\\n  -H \"X-API-Key: ${apiKey}\" \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\"messages\": [{\"role\": \"user\", \"content\": \"${testMessage}\"}], \"user_id\": \"setup-test\"}'\n```\n\n资料来源：[server/dashboard/src/app/setup/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/setup/page.tsx)\n\n### CLI Setup\n\nInitialize the CLI with your credentials:\n\n```bash\nmem0 init\nmem0 add \"Prefers dark mode and vim keybindings\" --user-id alice\nmem0 search \"What does Alice prefer?\" --user-id alice\n```\n\n资料来源：[README.md:26-27]()\n\n### Provider Configuration\n\nConfigure your LLM and embedding providers:\n\n| Setting | Description | Example Value |\n|---------|-------------|---------------|\n| LLM Provider | Language model provider | OpenAI, Anthropic, Azure OpenAI |\n| LLM Model | Specific model identifier | gpt-4o, claude-3-5-sonnet-20240620 |\n| Embedder Provider | Embedding model provider | OpenAI, Azure OpenAI |\n| Embedder Model | Embedding model identifier | text-embedding-3-small |\n| API Key | Provider authentication key | sk-... |\n\n资料来源：[server/dashboard/src/app/(root)/dashboard/configuration/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/configuration/page.tsx)\n\n## Core Operations\n\n### Adding Memories\n\nMemories can be added through various interfaces:\n\n**CLI:**\n```bash\nmem0 add \"User prefers dark mode\" --user-id alice\nmem0 add \"Agent configuration\" --agent-id bot-123\n```\n\n**SDK (Python):**\n```python\nfrom mem0 import Mem0\n\nclient = Mem0()\nclient.add(\"User prefers dark mode\", user_id=\"alice\")\n```\n\n### Searching Memories\n\n```bash\nmem0 search \"What are user preferences?\" --user-id alice\n```\n\n### Bulk Import\n\nImport memories from a JSON file:\n\n```bash\nmem0 import data.json --user-id alice\n```\n\nJSON file format:\n```json\n[\n  {\n    \"memory\": \"User prefers dark mode\",\n    \"user_id\": \"alice\",\n    \"metadata\": {\"source\": \"survey\"}\n  }\n]\n```\n\nEach item can include `memory` (or `text` or `content`), optional `user_id`, `agent_id`, and `metadata` fields.\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n### Entity Management\n\n```bash\n# List entities\nmem0 entity list users\nmem0 entity list agents --output json\n\n# Delete entities\nmem0 entity delete --user-id alice --force\n```\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n## API Key Management\n\n### Creating API Keys\n\n1. Navigate to **Dashboard → API Keys**\n2. Click **Create API Key**\n3. Save the generated key securely\n\n> **Important:** Save your API key immediately after creation — it will not be displayed again.\n\n资料来源：[server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx)\n\n### Key Limitations\n\n| Plan | Key Limit | Notes |\n|------|-----------|-------|\n| Free | 3 keys | Consider Cloud for multiple applications |\n| Cloud | Multiple | Project-based isolation available |\n\nA warning banner appears when you reach the 3-key limit on self-hosted deployments.\n\n资料来源：[server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx)\n\n## CLI Commands Reference\n\n| Command | Description |\n|---------|-------------|\n| `mem0 init` | Initialize CLI with credentials |\n| `mem0 add <text>` | Add a memory |\n| `mem0 search <query>` | Search memories |\n| `mem0 import <file>` | Bulk import from JSON |\n| `mem0 config show` | Display current config |\n| `mem0 config get <key>` | Get specific config value |\n| `mem0 config set <key> <value>` | Set a config value |\n| `mem0 entity list <type>` | List entities (users/agents/apps/runs) |\n| `mem0 entity delete` | Delete an entity |\n| `mem0 event list` | List background events |\n| `mem0 event status <id>` | Check event status |\n| `mem0 status` | Verify API connection |\n| `mem0 version` | Print CLI version |\n\n**Flags:**\n- `--user-id <id>` — Specify user context\n- `--agent-id <id>` — Specify agent context\n- `--preview` — Preview without deleting (for delete operations)\n- `--force` — Skip confirmation prompt\n- `-o, --output` — Output format (text/json)\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n## Docker Development (OpenMemory)\n\nFor local API development using OpenMemory:\n\n```bash\n# Build containers\nmake build\n\n# Create environment file\nmake env\n# Then edit api/.env and enter OPENAI_API_KEY\n\n# Start services\nmake up\n```\n\nThe API will be available at `http://localhost:8765`\n\n**Common Commands:**\n```bash\nmake logs      # View container logs\nmake shell     # Open shell in container\nmake migrate   # Run database migrations\nmake test      # Run tests\nmake test-clean # Run tests and clean up\nmake down      # Stop containers\n```\n\nAPI documentation available at:\n- Swagger UI: `http://localhost:8765/docs`\n- ReDoc: `http://localhost:8765/redoc`\n\n资料来源：[openmemory/api/README.md](https://github.com/mem0ai/mem0/blob/main/openmemory/api/README.md)\n\n## Running Experiments\n\nFor evaluation purposes, Mem0 provides experiment scripts:\n\n```bash\n# Memory Techniques\nmake run-mem0-add         # Add memories using Mem0\nmake run-mem0-search      # Search memories using Mem0\nmake run-mem0-plus-add    # Add memories using Mem0+ (graph-based)\nmake run-mem0-plus-search # Search memories using Mem0+\n\n# RAG Experiments\nmake run-rag              # Run RAG with chunk size 500\nmake run-full-context     # Run RAG with full context\n\n# Other Techniques\nmake run-langmem          # Run LangMem experiments\nmake run-zep-add          # Add memories using Zep\nmake run-zep-search       # Search memories using Zep\nmake run-openai           # Run OpenAI experiments\n```\n\n**Custom Parameters:**\n\n| Parameter | Description | Default |\n|-----------|-------------|---------|\n| `--technique_type` | Memory technique (mem0, rag, langmem) | mem0 |\n| `--method` | Method to use (add, search) | add |\n| `--chunk_size` | Chunk size for processing | 1000 |\n| `--top_k` | Number of results to retrieve | varies |\n\nAlternatively, run experiments directly:\n```bash\npython run_experiments.py --technique_type [mem0|rag|langmem] [additional parameters]\n```\n\n资料来源：[evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n\n## Next Steps\n\n- **Configuration:** Customize provider settings in the dashboard configuration page\n- **API Reference:** Explore the full API at `/docs` when running self-hosted\n- **Documentation:** Visit [docs.mem0.ai](https://docs.mem0.ai) for detailed guides\n- **Examples:** Check the `examples/` directory for integration demos\n- **CLI Help:** Run `mem0 --help` for command options\n\n## Citation\n\nIf you use Mem0 in your research or application, please cite:\n\n```bibtex\n@article{mem0,\n  title={Mem0: Building Production-Ready AI Agents with Scalable Long-Term Memory},\n  author={Chhikara, Prateek and Khant, Dev and Aryan, Saket and Singh, Taranjeet and Yadav, Deshraj},\n  journal={arXiv preprint arXiv:2504.19413},\n  year={2025}\n}\n```\n\n资料来源：[README.md:1-10]()\n\n---\n\n<a id='page-use-cases'></a>\n\n## Use Cases and Applications\n\n### 相关页面\n\n相关主题：[Introduction to Mem0](#page-introduction)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/mem0ai/mem0/blob/main/README.md)\n- [openclaw/README.md](https://github.com/mem0ai/mem0/blob/main/openclaw/README.md)\n- [evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n- [cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n- [examples/multimodal-demo/src/components/messages.tsx](https://github.com/mem0ai/mem0/blob/main/examples/multimodal-demo/src/components/messages.tsx)\n</details>\n\n# Use Cases and Applications\n\nMem0 provides a comprehensive memory infrastructure for AI applications, enabling developers to build intelligent systems that maintain context across conversations, users, and sessions. This page documents the primary use cases, application patterns, and real-world scenarios where Mem0 adds significant value.\n\n## Overview\n\nMem0 is designed as a memory layer for AI agents and applications. It addresses the fundamental challenge of maintaining stateful, personalized interactions in AI systems that are inherently stateless. The platform supports multiple deployment models including self-hosted servers, cloud platforms, and embedded SDK integrations. 资料来源：[README.md:1-30]()\n\n## Core Use Cases\n\n### Personal AI Assistants\n\nMem0 powers personal AI assistants that learn and remember user preferences, habits, and historical interactions. These assistants can recall past conversations, understand user context, and provide personalized responses based on accumulated knowledge.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Mem0 Memory Layer]\n    B --> C{Retrieve Relevant Memories}\n    C --> D[User Preferences]\n    C --> E[Conversation History]\n    C --> F[Historical Context]\n    D --> G[AI Response Generation]\n    E --> G\n    F --> G\n    G --> H[Store New Memories]\n    H --> B\n```\n\n**Key Features:**\n- Persistent user profiles across sessions\n- Preference learning and adaptation\n- Context-aware response generation\n- Multi-turn conversation continuity\n\n### Customer Support Chatbots\n\nEnterprise customer support systems benefit from Mem0's ability to maintain conversation history and customer context. Support agents and chatbots can access previous tickets, understand ongoing issues, and provide consistent assistance across multiple interaction channels. 资料来源：[README.md:40-60]()\n\n**Implementation Pattern:**\n```python\n# Typical customer support memory flow\nmemory.add(\n    text=\"Customer reported payment failure on order #12345\",\n    user_id=\"customer_456\",\n    metadata={\"ticket_id\": \"T-789\", \"priority\": \"high\"}\n)\n```\n\n### Healthcare Assistants\n\nAI-powered healthcare applications use Mem0 to maintain patient context, track medical history, and ensure continuity of care across multiple interactions. These systems must handle sensitive data with appropriate privacy considerations while providing valuable clinical insights. 资料来源：[README.md:50-80]()\n\n**Key Considerations:**\n- HIPAA compliance for patient data\n- Structured memory storage for medical records\n- Temporal context preservation\n- Multi-provider information aggregation\n\n### Enterprise Knowledge Management\n\nOrganizations leverage Mem0 to build knowledge bases that automatically capture, organize, and retrieve institutional knowledge. Unlike static knowledge bases, Mem0-powered systems continuously learn from interactions and user feedback.\n\n| Feature | Description | Benefit |\n|---------|-------------|---------|\n| Semantic Search | Natural language queries across memories | Fast information retrieval |\n| Hybrid Search | BM25 + vector embeddings | Comprehensive results |\n| Entity Extraction | Automatic categorization | Organized knowledge |\n| Temporal Weighting | Recent information prioritized | Relevant responses |\n\n资料来源：[README.md:35-45]()\n\n## Application Architecture Patterns\n\n### Multi-Agent Systems\n\nMem0 supports complex multi-agent architectures where different agents share contextual information through a unified memory layer.\n\n```mermaid\ngraph LR\n    A[Agent A] -->|Read/Write| M[Mem0 Memory]\n    B[Agent B] -->|Read/Write| M\n    C[Agent C] -->|Read/Write| M\n    M --> D[Shared Context]\n    D --> E[Coordinated Actions]\n```\n\n**Multi-Agent Memory Configuration:**\n```python\nfrom mem0 import Memory\n\nmemory = Memory.from_ids(\n    user_id=\"shared_session_123\",\n    agent_id=None,  # Shared across agents\n    run_id=\"workflow_456\"\n)\n```\n\n### Retrieval-Augmented Generation (RAG)\n\nMem0 integrates with RAG pipelines to enhance LLM responses with retrieved memories. The platform supports configurable chunk sizes, embedding models, and hybrid search strategies. 资料来源：[evaluation/README.md:1-50]()\n\n| RAG Configuration | Parameter | Default Value |\n|-------------------|-----------|---------------|\n| Chunk Size | `chunk_size` | 1000 |\n| Embedding Model | `embedding_model` | text-embedding-3-small |\n| Search Technique | `technique_type` | mem0, rag, langmem |\n| Top-K Results | `top_k` | Configurable |\n\n### Multi-Modal Applications\n\nModern AI applications process multiple input types including text, images, and audio. Mem0 stores and retrieves context from multi-modal conversations, enabling coherent responses across different content types. 资料来源：[examples/multimodal-demo/src/components/messages.tsx:1-60]()\n\n## Deployment Scenarios\n\n### Self-Hosted Server\n\nOrganizations requiring full control over their data can deploy Mem0 as a self-hosted solution. The self-hosted server includes a dashboard for management, API key generation, and configuration options. 资料来源：[README.md:60-80]()\n\n```bash\n# Quick start with bootstrap\ncd server && make bootstrap\n\n# Manual Docker deployment\ncd server && docker compose up -d\n```\n\n**Self-Hosted Features:**\n- Admin account creation via setup wizard\n- API key management through dashboard\n- Configuration for LLM and embedding providers\n- Request logging and analytics\n- Webhook support for event notifications\n\n### Cloud Platform\n\nThe Mem0 cloud platform provides a managed solution with additional features including project-based isolation, SSO/SAML authentication, and enterprise support. 资料来源：[README.md:50-60]()\n\n### Embedded SDK Integration\n\nFor applications requiring client-side or edge deployment, Mem0 provides lightweight SDKs:\n\n| Platform | Installation | Use Case |\n|----------|--------------|----------|\n| Python | `pip install mem0ai` | Backend services, data processing |\n| JavaScript/TypeScript | `npm install mem0ai` | Web applications, Node.js services |\n| CLI | `npm install -g @mem0/cli` | Local development, debugging |\n\n资料来源：[README.md:25-40]()\n\n## CLI Applications\n\nThe Mem0 CLI enables developers to manage memories directly from the terminal, useful for development, debugging, and automation tasks. 资料来源：[cli/node/README.md:1-80]()\n\n```bash\n# Initialize CLI configuration\nmem0 init\n\n# Add memories\nmem0 add \"User prefers dark mode\" --user-id alice\n\n# Search memories\nmem0 search \"What does Alice prefer?\" --user-id alice\n\n# Manage entities\nmem0 entity list users\nmem0 entity delete --user-id alice --force\n```\n\n**CLI Commands Reference:**\n\n| Command | Description | Key Flags |\n|---------|-------------|-----------|\n| `mem0 add` | Add a memory | `--user-id`, `--agent-id`, `--metadata` |\n| `mem0 search` | Search memories | `--user-id`, `--output` |\n| `mem0 list` | List all memories | `--user-id`, `--limit` |\n| `mem0 delete` | Delete memories | `--user-id`, `--force` |\n| `mem0 import` | Bulk import | JSON file support |\n| `mem0 config` | Manage settings | `show`, `get`, `set` |\n| `mem0 status` | Check connection | Project verification |\n| `mem0 event` | Monitor async events | `list`, `status` |\n\n## Evaluation and Benchmarking\n\nMem0 includes comprehensive evaluation tools for comparing different memory techniques and configurations. The evaluation framework supports multiple approaches including Mem0, Mem0+, RAG, and LangMem. 资料来源：[evaluation/README.md:50-100]()\n\n```bash\n# Run Mem0 experiments\nmake run-mem0-add\nmake run-mem0-search\n\n# Run Mem0+ with graph-based search\nmake run-mem0-plus-add\nmake run-mem0-plus-search\n\n# Run RAG experiments\nmake run-rag\nmake run-full-context\n\n# Run custom experiments\npython run_experiments.py --technique_type mem0 --method add\n```\n\n**Experiment Parameters:**\n\n| Parameter | Description | Valid Values |\n|-----------|-------------|---------------|\n| `--technique_type` | Memory technique | mem0, rag, langmem |\n| `--method` | Operation type | add, search |\n| `--chunk_size` | Processing chunk size | Integer |\n| `--top_k` | Results to retrieve | Integer |\n\n## Industry-Specific Applications\n\n### OpenClaw Platform Integration\n\nOpenClaw demonstrates how Mem0 integrates with specialized AI platforms for specific domains. The platform supports both hosted API mode and self-hosted open-source mode with configurable memory behaviors. 资料来源：[openclaw/README.md:1-50]()\n\n**Platform Mode Configuration:**\n| Key | Type | Description |\n|-----|------|-------------|\n| `apiKey` | string | Mem0 API key (required) |\n| `customInstructions` | string | Extraction rules |\n| `customCategories` | object | Category definitions |\n\n**Open-Source Mode Defaults:**\n| Component | Default Value |\n|-----------|---------------|\n| Embeddings | text-embedding-3-small |\n| Vector Store | Local SQLite |\n| LLM | gpt-5-mini |\n| Database Path | ~/.mem0/vector_store.db |\n\n### Support Inbox Automation\n\nAutomated support systems use Mem0 to track issue resolution history, maintain customer context across channels, and enable intelligent routing based on historical patterns.\n\n### Email Automation\n\nEmail-based workflows leverage Mem0's ability to maintain conversation context across email threads, automatically categorizing and prioritizing messages based on user history and past interactions.\n\n## Best Practices\n\n### Memory Structuring\n\nOrganize memories with appropriate metadata for optimal retrieval:\n\n```python\nmemory.add(\n    text=\"Customer's subscription expired\",\n    user_id=\"customer_123\",\n    metadata={\n        \"category\": \"billing\",\n        \"priority\": \"medium\",\n        \"timestamp\": \"2025-01-15\"\n    }\n)\n```\n\n### Privacy Considerations\n\n- Implement data retention policies\n- Use encryption for sensitive information\n- Leverage user consent mechanisms\n- Enable data export and deletion capabilities\n\n### Performance Optimization\n\n- Configure appropriate embedding models for your use case\n- Use hybrid search combining semantic and keyword matching\n- Implement caching for frequently accessed memories\n- Monitor request latency through the dashboard\n\n## Additional Resources\n\n- [Quick Start Guide](https://docs.mem0.ai/)\n- [API Reference](https://docs.mem0.ai/api-reference/)\n- [Self-Hosted Documentation](https://docs.mem0.ai/open-source/overview)\n- [CLI Reference](https://docs.mem0.ai/platform/cli)\n- [Platform Documentation](https://app.mem0.ai)\n\n---\n\n<a id='page-architecture'></a>\n\n## System Architecture\n\n### 相关页面\n\n相关主题：[Introduction to Mem0](#page-introduction), [Memory Operations](#page-memory-operations), [Python SDK](#page-python-sdk), [Vector Stores and Storage](#page-vector-stores)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/memory/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/main.py)\n- [mem0/memory/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/base.py)\n- [mem0/memory/storage.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/storage.py)\n- [server/main.py](https://github.com/mem0ai/mem0/blob/main/server/main.py)\n- [server/routers/__init__.py](https://github.com/mem0ai/mem0/blob/main/server/routers/__init__.py)\n- [mem0-ts/src/client/mem0.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.ts)\n- [server/README.md](https://github.com/mem0ai/mem0/blob/main/server/README.md)\n</details>\n\n# System Architecture\n\n## Overview\n\nMem0 is an intelligent memory layer designed for AI agents and applications. It provides persistent, scalable long-term memory capabilities that enable AI systems to retain, retrieve, and manage information across conversations and sessions.\n\n资料来源：[server/README.md](https://github.com/mem0ai/mem0/blob/main/server/README.md)\n\nThe architecture follows a modular design pattern with distinct layers for memory management, storage, API serving, and client implementations. This separation enables flexibility in deployment options and supports multiple use cases from embedded applications to cloud-based services.\n\n## High-Level Architecture\n\n```mermaid\ngraph TD\n    subgraph Client_Layer[\"Client Layer\"]\n        CLI[\"CLI Application<br/>mem0\"]\n        TS_Client[\"TypeScript Client<br/>mem0-ts\"]\n        Python_SDK[\"Python SDK<br/>mem0ai/mem0\"]\n    end\n    \n    subgraph API_Layer[\"API Layer\"]\n        Server[\"FastAPI Server<br/>server/main.py\"]\n        Routers[\"API Routers<br/>server/routers/\"]\n    end\n    \n    subgraph Memory_Core[\"Memory Core\"]\n        Main[\"Memory Manager<br/>mem0/memory/main.py\"]\n        Base[\"Base Memory<br/>mem0/memory/base.py\"]\n        Storage[\"Storage Engine<br/>mem0/memory/storage.py\"]\n    end\n    \n    subgraph Storage_Backend[\"Storage Backend\"]\n        VectorStore[\"Vector Store\"]\n        DB[\"Database\"]\n    end\n    \n    CLI --> Server\n    TS_Client --> Server\n    Python_SDK --> Main\n    Main --> Base\n    Main --> Storage\n    Storage --> VectorStore\n    Storage --> DB\n    Server --> Main\n```\n\n## Core Components\n\n### Memory Module Architecture\n\nThe memory module is the heart of the Mem0 system, implementing the core memory operations.\n\n资料来源：[mem0/memory/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/main.py)\n\n| Component | File | Purpose |\n|-----------|------|---------|\n| MemoryManager | `mem0/memory/main.py` | Orchestrates memory operations |\n| BaseMemory | `mem0/memory/base.py` | Abstract base class defining the memory interface |\n| Storage | `mem0/memory/storage.py` | Handles persistence and retrieval of memory data |\n\n### Base Memory Class\n\nThe base class defines the contract that all memory implementations must follow.\n\n资料来源：[mem0/memory/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/base.py)\n\n```mermaid\nclassDiagram\n    class BaseMemory {\n        <<abstract>>\n        +add() AddMemory\n        +search() SearchMemory\n        +get() GetMemory\n        +update() UpdateMemory\n        +delete() DeleteMemory\n        +list() ListMemories\n    }\n    \n    class MemoryManager {\n        +add()\n        +search()\n        +get()\n        +update()\n        +delete()\n        +list()\n        -storage: Storage\n    }\n    \n    BaseMemory <|-- MemoryManager\n```\n\n### Storage Engine\n\nThe storage layer handles the persistence of memory data using vector embeddings and traditional database storage.\n\n资料来源：[mem0/memory/storage.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/storage.py)\n\n#### Supported Storage Backends\n\n| Storage Type | Description |\n|--------------|-------------|\n| Vector Store | Embedding-based similarity search |\n| SQL Database | Structured data storage for metadata |\n| Memory | In-memory storage for testing |\n| Graph | Graph-based relationships (Mem0+) |\n\n## API Layer\n\n### Server Architecture\n\nThe server layer is built on FastAPI, providing RESTful endpoints for memory operations.\n\n资料来源：[server/main.py](https://github.com/mem0ai/mem0/blob/main/server/main.py)\n\n```mermaid\ngraph LR\n    subgraph Endpoints[\"API Endpoints\"]\n        A[\"Add Memory\"]\n        S[\"Search Memory\"]\n        G[\"Get Memory\"]\n        U[\"Update Memory\"]\n        D[\"Delete Memory\"]\n        L[\"List Memories\"]\n    end\n    \n    subgraph Router[\"Router Module\"]\n        R[\"server/routers/__init__.py\"]\n    end\n    \n    A --> R\n    S --> R\n    G --> R\n    U --> R\n    D --> R\n    L --> R\n    R --> MemoryCore[\"Memory Core\"]\n```\n\n### API Configuration\n\nThe system supports various configuration options for deployment flexibility.\n\n资料来源：[server/README.md](https://github.com/mem0ai/mem0/blob/main/server/README.md)\n\n| Parameter | Description | Default |\n|-----------|-------------|---------|\n| `OPENAI_API_KEY` | API key for GPT models and embeddings | Required |\n| `MEM0_API_KEY` | Mem0 API key for cloud features | Optional |\n| `MEM0_PROJECT_ID` | Project identifier | Optional |\n| `MEM0_ORGANIZATION_ID` | Organization identifier | Optional |\n| `MODEL` | LLM model for completions | `gpt-4o-mini` |\n| `EMBEDDING_MODEL` | Embedding model | `text-embedding-3-small` |\n| `ZEP_API_KEY` | Zep service API key | Optional |\n\n## Client Implementations\n\n### Python SDK\n\nThe Python SDK provides the primary interface for integrating Mem0 into applications.\n\n资料来源：[mem0ai/mem0](https://github.com/mem0ai/mem0/blob/main/mem0ai/mem0)\n\n```python\nfrom mem0 import Memory\n\nmemory = Memory()\nmemory.add(\"User prefers dark mode\", user_id=\"alice\")\nresults = memory.search(\"What are user preferences?\", user_id=\"alice\")\n```\n\n### TypeScript Client\n\nThe TypeScript implementation provides memory capabilities for JavaScript/TypeScript environments.\n\n资料来源：[mem0-ts/src/client/mem0.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.ts)\n\n```typescript\nimport { Memory } from \"mem0-ts\";\n\nconst memory = new Memory({\n  embedder: {\n    provider: \"openai\",\n    config: { apiKey: process.env.OPENAI_API_KEY }\n  }\n});\n```\n\n### CLI Application\n\nThe command-line interface provides direct access to memory operations.\n\n资料来源：[cli/README.md](https://github.com/mem0ai/mem0/blob/main/cli/README.md)\n\n| Command | Description |\n|---------|-------------|\n| `mem0 init` | Setup wizard for authentication |\n| `mem0 add` | Add memory from text, JSON, or file |\n| `mem0 search` | Search memories using natural language |\n| `mem0 list` | List memories with filters |\n| `mem0 get` | Retrieve specific memory by ID |\n| `mem0 update` | Update memory text or metadata |\n| `mem0 delete` | Delete memory or entity |\n| `mem0 import` | Bulk import from JSON file |\n\n#### CLI Agent Mode\n\nThe CLI supports agent mode for AI agent tool loops:\n\n```bash\nmem0 --agent search \"user preferences\" --user-id alice\nmem0 --agent add \"User prefers dark mode\" --user-id alice\n```\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n## Memory Techniques\n\nMem0 supports multiple memory retrieval techniques for different use cases.\n\n资料来源：[evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n\n```mermaid\ngraph TD\n    subgraph Techniques[\"Memory Techniques\"]\n        M0[\"Mem0<br/>Vector-based retrieval\"]\n        M0P[\"Mem0+<br/>Graph-based search\"]\n        RAG[\"RAG<br/>Chunk-based retrieval\"]\n        LM[\"LangMem<br/>Language model memory\"]\n    end\n    \n    subgraph Use_Cases[\"Use Cases\"]\n        UC1[\"Personal assistants\"]\n        UC2[\"Customer support\"]\n        UC3[\"Research tools\"]\n        UC4[\"Enterprise applications\"]\n    end\n    \n    M0 --> UC1\n    M0P --> UC2\n    RAG --> UC3\n    LM --> UC4\n```\n\n### Technique Comparison\n\n| Technique | Description | Best For |\n|-----------|-------------|----------|\n| Mem0 | Vector-based semantic search | General purpose memory |\n| Mem0+ | Graph-enhanced retrieval | Complex relationship queries |\n| RAG | Chunk-based retrieval | Document-heavy applications |\n| LangMem | LLM-native memory | Language model integration |\n\n## Data Models\n\n### Memory Entity Structure\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | string | Unique memory identifier |\n| `memory` | string | Memory content text |\n| `user_id` | string | Associated user identifier |\n| `agent_id` | string | Associated agent identifier |\n| `app_id` | string | Associated application identifier |\n| `run_id` | string | Associated run identifier |\n| `metadata` | object | Custom metadata key-value pairs |\n| `created_at` | datetime | Creation timestamp |\n| `updated_at` | datetime | Last update timestamp |\n\n### Evaluation Metrics\n\nThe system tracks multiple metrics for performance evaluation.\n\n资料来源：[evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n\n| Metric | Description |\n|--------|-------------|\n| BLEU Score | Text similarity measure |\n| F1 Score | Precision/recall balance |\n| LLM Score | Judge-based evaluation |\n\n## Deployment Options\n\n### Local/Embedded Mode\n\nFor applications requiring local-only memory:\n\n- SQLite-based vector store: `~/.mem0/vector_store.db`\n- History database: `~/.mem0/history.db`\n- Memory consolidation state: `<pluginStateDir>/dream-state.json`\n\n资料来源：[openclaw/README.md](https://github.com/mem0ai/mem0/blob/main/openclaw/README.md)\n\n### Cloud Mode\n\nFor managed Mem0 cloud services:\n\n- Requires `MEM0_API_KEY`\n- Project and organization configuration\n- Scalable vector storage\n\n### Server Deployment\n\nThe FastAPI server can be deployed independently:\n\n```bash\n# Start server\npython server/main.py\n\n# Configure via environment variables\n# - Set API keys\n# - Configure storage backends\n# - Set model preferences\n```\n\n资料来源：[server/main.py](https://github.com/mem0ai/mem0/blob/main/server/main.py)\n\n## Vercel AI SDK Integration\n\nMem0 provides seamless integration with the Vercel AI SDK for streaming responses with memory.\n\n资料来源：[vercel-ai-sdk/README.md](https://github.com/mem0ai/mem0/blob/main/vercel-ai-sdk/README.md)\n\n```typescript\nconst mem0 = createMem0({\n  config: {\n    // Model configuration options\n  }\n});\n```\n\n### Best Practices for Vercel Integration\n\n1. **User Identification**: Always provide a unique `user_id` for consistent memory retrieval\n2. **Context Management**: Balance context window sizes with memory requirements\n3. **Error Handling**: Implement proper error handling for memory operations\n4. **Memory Cleanup**: Regularly clean up unused memory contexts\n\n## Evaluation Framework\n\nThe evaluation module provides comprehensive testing capabilities.\n\n资料来源：[evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n\n### Running Experiments\n\n```bash\n# Run Mem0 experiments\nmake run-mem0-add\nmake run-mem0-search\n\n# Run Mem0+ experiments\nmake run-mem0-plus-add\nmake run-mem0-plus-search\n\n# Run RAG experiments\nmake run-rag\n```\n\n### Evaluation Command-Line Parameters\n\n| Parameter | Description | Default |\n|-----------|-------------|---------|\n| `--technique_type` | Memory technique | `mem0` |\n| `--method` | Method to use | `add` |\n| `--chunk_size` | Processing chunk size | `1000` |\n| `--top_k` | Top memories to retrieve | `30` |\n| `--is_graph` | Use graph-based search | `False` |\n\n## System Flow Diagrams\n\n### Memory Addition Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant API\n    participant MemoryManager\n    participant Storage\n    participant VectorStore\n    \n    Client->>API: Add memory request\n    API->>MemoryManager: Process memory\n    MemoryManager->>MemoryManager: Extract facts\n    MemoryManager->>Storage: Store memory\n    Storage->>VectorStore: Generate embeddings\n    VectorStore->>Storage: Store vectors\n    Storage->>MemoryManager: Confirm storage\n    MemoryManager->>API: Return memory ID\n    API->>Client: Success response\n```\n\n### Memory Search Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant API\n    participant MemoryManager\n    participant Storage\n    participant VectorStore\n    \n    Client->>API: Search request\n    API->>MemoryManager: Process query\n    MemoryManager->>VectorStore: Generate query embedding\n    VectorStore->>MemoryManager: Return similar memories\n    MemoryManager->>API: Format results\n    API->>Client: Return search results\n```\n\n## Security Considerations\n\n### API Key Management\n\n- Use environment variables for sensitive credentials\n- Rotate API keys periodically\n- Implement proper access controls for production deployments\n\n### Data Privacy\n\n- User data isolation via `user_id` scoping\n- Support for entity-level deletion\n- Optional metadata encryption for sensitive information\n\n## Extensibility Points\n\nThe architecture supports extension through:\n\n1. **Custom Storage Backends**: Implement the storage interface for new backends\n2. **Custom Embedding Providers**: Add support for alternative embedding models\n3. **Custom Memory Techniques**: Extend base class for specialized retrieval\n4. **Plugin System**: OpenClaw integration for additional capabilities\n\n## References\n\n- Main Repository: [mem0ai/mem0](https://github.com/mem0ai/mem0)\n- Documentation: [docs.mem0.ai](https://docs.mem0.ai)\n- Paper Citation: [arXiv:2504.19413](https://arxiv.org/abs/2504.19413)\n\n---\n\n<a id='page-memory-operations'></a>\n\n## Memory Operations\n\n### 相关页面\n\n相关主题：[System Architecture](#page-architecture), [AI Model Integration](#page-ai-integration), [Vector Stores and Storage](#page-vector-stores), [Python SDK](#page-python-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/memory/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/main.py)\n- [docs/core-concepts/memory-operations/add.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/add.mdx)\n- [docs/core-concepts/memory-operations/search.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/search.mdx)\n- [docs/core-concepts/memory-operations/update.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/update.mdx)\n- [docs/core-concepts/memory-operations/delete.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/delete.mdx)\n- [docs/core-concepts/memory-types.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-types.mdx)\n- [docs/open-source/features/async-memory.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/async-memory.mdx)\n- [docs/open-source/features/metadata-filtering.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/metadata-filtering.mdx)\n- [docs/open-source/features/custom-instructions.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/custom-instructions.mdx)\n</details>\n\n# Memory Operations\n\nMemory operations are the core CRUD (Create, Read, Update, Delete) interactions that power the Mem0 memory system. These operations enable AI agents to store, retrieve, modify, and delete persistent memory across user sessions, agent executions, and application contexts. The memory operations layer abstracts the complexity of vector storage, semantic indexing, and multi-entity management into a unified API that supports both synchronous and asynchronous execution patterns.\n\n## Overview\n\nThe Mem0 memory system provides five fundamental operations that form the backbone of persistent memory management. Each operation is designed to work with multiple entity scopes, including user-level, agent-level, application-level, and run-level contexts. The operations support rich metadata filtering, custom instructions for memory processing, and both blocking and non-blocking execution modes for handling large-scale memory operations.\n\nMemory operations in Mem0 are built on a layered architecture where the core memory module (`mem0/memory/main.py`) handles the business logic, while underlying vector stores and databases manage persistence. This separation allows Mem0 to support different deployment scenarios from local SQLite-based storage to cloud-hosted vector databases.\n\n## Core Memory Operations\n\n### Add Memory\n\nThe **Add** operation is the primary mechanism for storing new information in the memory system. When a memory is added, Mem0 performs several processing steps including embedding generation, fact extraction, and semantic categorization before storing the data in the appropriate vector store.\n\n**Function signature and parameters:**\n\n```python\ndef add(\n    messages: str | list[dict],\n    user_id: str | None = None,\n    agent_id: str | None = None,\n    app_id: str | None = None,\n    run_id: str | None = None,\n    metadata: dict | None = None,\n    filter_version: str | None = \"v1.0\",\n    prompt: str | None = None,\n    max_items: int | None = None\n) -> dict\n```\n\n资料来源：[mem0/memory/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/main.py)\n\n**Operation workflow:**\n\n```mermaid\ngraph TD\n    A[Input: messages + entity identifiers] --> B[Validate input and entity scope]\n    B --> C[Generate vector embeddings]\n    C --> D[Extract facts using LLM]\n    D --> E[Apply custom instructions if configured]\n    E --> F[Store in vector store with metadata]\n    F --> G[Return memory IDs and stored content]\n```\n\n**Adding memories via CLI:**\n\n```bash\n# Add a simple text memory\nmem0 add \"I prefer dark mode\" --user-id alice\n\n# Add from a JSON messages array\nmem0 add --file conversation.json --user-id alice\n\n# Add from stdin\necho \"Loves hiking on weekends\" | mem0 add --user-id alice\n\n# Add with metadata\nmem0 add \"User prefers TypeScript over JavaScript\" --metadata '{\"category\": \"preference\", \"priority\": \"high\"}'\n```\n\n资料来源：[cli/python/README.md](https://github.com/mem0ai/mem0/blob/main/cli/python/README.md)\n\n### Search Memory\n\nThe **Search** operation retrieves relevant memories based on natural language queries. Mem0 converts the query into a vector embedding and performs similarity search against stored memories, returning results ranked by relevance. The search operation supports filtering by entity scope, metadata attributes, and memory types.\n\n**Function signature and parameters:**\n\n```python\ndef search(\n    query: str,\n    user_id: str | None = None,\n    agent_id: str | None = None,\n    app_id: str | None = None,\n    run_id: str | None = None,\n    version: str | None = \"v1.1\",\n    limit: int = 10,\n    category: str | None = None,\n    filter: dict | None = None,\n    rerank: bool = False\n) -> list[dict]\n```\n\n资料来源：[docs/core-concepts/memory-operations/search.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/search.mdx)\n\n**Search with metadata filtering:**\n\nMetadata filtering allows precise memory retrieval based on specific attributes stored with each memory. This is particularly useful for retrieving memories that match certain criteria without relying solely on semantic similarity.\n\n```python\nresult = memory.search(\n    query=\"user preferences\",\n    user_id=\"alice\",\n    filter={\n        \"category\": \"preference\",\n        \"priority\": {\"$eq\": \"high\"}\n    }\n)\n```\n\n资料来源：[docs/open-source/features/metadata-filtering.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/metadata-filtering.mdx)\n\n**CLI search examples:**\n\n```bash\n# Basic semantic search\nmem0 search \"What are Alice's preferences?\" --user-id alice\n\n# Search with output formatting\nmem0 search \"preferences\" --output json --top-k 20\n\n# Search within specific scope\nmem0 search \"agent behavior\" --agent-id agent-123\n```\n\n### Get Memory\n\nThe **Get** operation retrieves a specific memory by its unique identifier. Unlike search which performs semantic similarity, get provides direct access to a known memory record for viewing, editing, or deletion operations.\n\n**CLI usage:**\n\n```bash\n# Retrieve a specific memory by ID\nmem0 get 7b3c1a2e-4d5f-6789-abcd-ef0123456789\n\n# Get memory with JSON output for AI agent processing\nmem0 get 7b3c1a2e-4d5f-6789-abcd-ef0123456789 --output json\n```\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n### Update Memory\n\nThe **Update** operation modifies the content or metadata of an existing memory while preserving the memory's history and relationships. The update operation preserves the original memory ID and maintains audit trails of modifications.\n\n**Function signature and parameters:**\n\n```python\ndef update(\n    memory_id: str,\n    data: str | None = None,\n    metadata: dict | None = None,\n    user_id: str | None = None\n) -> dict\n```\n\n资料来源：[docs/core-concepts/memory-operations/update.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/update.mdx)\n\n**Update operation workflow:**\n\n```mermaid\ngraph TD\n    A[Update request with memory_id] --> B[Locate existing memory record]\n    B --> C[Apply content or metadata changes]\n    C --> D[Update vector embeddings if content changed]\n    D --> E[Preserve modification history]\n    E --> F[Return updated memory object]\n```\n\n**CLI update examples:**\n\n```bash\n# Update memory text\nmem0 update <memory-id> \"Updated preference text\"\n\n# Update metadata only\nmem0 update <memory-id> --metadata '{\"priority\": \"high\"}'\n\n# Update via stdin\necho \"new text\" | mem0 update <memory-id>\n```\n\n### Delete Memory\n\nThe **Delete** operation removes memories from the storage system. Mem0 supports multiple deletion strategies including single memory deletion, bulk deletion by scope, and entity-level deletion that removes all associated memories.\n\n**Function signature and parameters:**\n\n```python\ndef delete(\n    memory_id: str | None = None,\n    user_id: str | None = None,\n    agent_id: str | None = None,\n    app_id: str | None = None,\n    run_id: str | None = None,\n    delete_all: bool = False,\n    confirm: bool = False\n) -> dict\n```\n\n资料来源：[docs/core-concepts/memory-operations/delete.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/delete.mdx)\n\n**CLI delete examples:**\n\n```bash\n# Delete a single memory\nmem0 delete <memory-id>\n\n# Delete all memories for a user (with confirmation)\nmem0 delete --all --user-id alice\n\n# Delete all memories project-wide\nmem0 delete --all --project --force\n\n# Preview what would be deleted\nmem0 delete --all --user-id alice --dry-run\n```\n\n**Delete flags reference:**\n\n| Flag | Description |\n|------|-------------|\n| `--all` | Delete all memories matching scope filters |\n| `--entity` | Delete the entity and all its memories |\n| `--project` | With `--all`: delete all memories project-wide |\n| `--dry-run` | Preview without deleting |\n| `--force` | Skip confirmation prompt |\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n### List Memories\n\nThe **List** operation retrieves memories with optional filters, pagination, and sorting. Unlike search which returns semantically relevant results, list provides comprehensive enumeration of stored memories within specified scopes.\n\n**CLI usage:**\n\n```bash\n# List all memories for a user\nmem0 list --user-id alice\n\n# List with pagination\nmem0 list --user-id alice --page 1 --page-size 50\n\n# List in JSON format for agent consumption\nmem0 list --user-id alice --output json\n```\n\n## Entity Scopes\n\nMem0 organizes memories within hierarchical entity scopes that provide logical separation and access control. Each memory belongs to at least one entity identifier, creating an ownership hierarchy.\n\n```mermaid\ngraph TB\n    A[Memory Record] --> B[user_id]\n    A --> C[agent_id]\n    A --> D[app_id]\n    A --> E[run_id]\n    \n    B --> F[User Entity]\n    C --> G[Agent Entity]\n    D --> H[Application Entity]\n    E --> I[Run Entity]\n    \n    F --> J[Project/Organization]\n    G --> J\n    H --> J\n    I --> J\n```\n\n**Entity scope parameters:**\n\n| Parameter | Description | Use Case |\n|-----------|-------------|----------|\n| `user_id` | Identifies the end user | Personal preferences, history |\n| `agent_id` | Identifies the AI agent | Agent behavior patterns, policies |\n| `app_id` | Identifies the application | App-specific configurations |\n| `run_id` | Identifies a session/run | Conversation context within a session |\n\n资料来源：[docs/core-concepts/memory-types.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-types.mdx)\n\n## Asynchronous Memory Operations\n\nFor large-scale memory operations that may take extended time to complete, Mem0 provides asynchronous execution modes. Async operations return immediately with an event ID that can be used to track progress and retrieve results.\n\n**Async operation support:**\n\n| Operation | Async Support | Return Value |\n|-----------|---------------|--------------|\n| `add` | Yes (bulk adds) | Event ID |\n| `search` | Yes | Event ID |\n| `delete` | Yes (bulk deletes) | Event ID |\n| `update` | No | Updated memory |\n| `get` | No | Memory object |\n| `list` | Yes | Event ID |\n\n资料来源：[docs/open-source/features/async-memory.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/async-memory.mdx)\n\n**Event monitoring via CLI:**\n\n```bash\n# List recent background processing events\nmem0 event list\n\n# Check the status of a specific event\nmem0 event status <event-id>\n```\n\n## Memory Types\n\nMem0 supports different memory types that serve distinct purposes in AI agent architectures. Each memory type has specific characteristics optimized for different retrieval patterns and use cases.\n\n**Memory type reference:**\n\n| Type | Purpose | Retrieval Pattern | Use Case |\n|------|---------|-------------------|----------|\n| `semantic` | Store facts and preferences | Semantic similarity search | User preferences, facts |\n| `episodic` | Record events and conversations | Time-based, sequential | Conversation history |\n| `procedural` | Store agent behaviors/actions | Task-based patterns | Agent workflows |\n| `long-term` | Persistent cross-session memory | Multi-dimensional search | User profiles, knowledge |\n\n资料来源：[docs/core-concepts/memory-types.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-types.mdx)\n\n## Metadata and Filtering\n\nMem0 supports rich metadata storage and filtering capabilities that enable precise memory retrieval beyond semantic similarity. Metadata can include arbitrary key-value pairs that are indexed for efficient filtering.\n\n**Metadata structure example:**\n\n```python\nmemory = {\n    \"id\": \"mem_xxxxx\",\n    \"memory\": \"User prefers dark mode for the interface\",\n    \"metadata\": {\n        \"category\": \"preference\",\n        \"priority\": \"high\",\n        \"source\": \"explicit_feedback\",\n        \"tags\": [\"ui\", \"theme\", \"dark-mode\"]\n    },\n    \"created_at\": \"2025-01-15T10:30:00Z\",\n    \"user_id\": \"alice\"\n}\n```\n\n**Filter operators supported:**\n\n| Operator | Description | Example |\n|----------|-------------|---------|\n| `$eq` | Equals | `{\"priority\": {\"$eq\": \"high\"}}` |\n| `$ne` | Not equals | `{\"status\": {\"$ne\": \"archived\"}}` |\n| `$in` | In array | `{\"category\": {\"$in\": [\"fact\", \"preference\"]}}` |\n| `$nin` | Not in array | `{\"source\": {\"$nin\": [\"deprecated\"]}}` |\n| `$gt`, `$gte` | Greater than (or equal) | `{\"score\": {\"$gt\": 0.8}}` |\n| `$lt`, `$lte` | Less than (or equal) | `{\"priority\": {\"$lte\": 5}}` |\n\n资料来源：[docs/open-source/features/metadata-filtering.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/metadata-filtering.mdx)\n\n## Custom Instructions\n\nCustom instructions provide a mechanism to customize how Mem0 processes and interprets memories. These instructions guide the LLM in extracting relevant facts, categorizing information, and determining storage behavior.\n\n**Configuration example:**\n\n```python\nmemory = Memory()\n\n# Set custom instructions for the memory instance\nmemory.configure(\n    custom_instructions=\"Focus on extracting user preferences about product features. \"\n                        \"Categorize memories by product area. \"\n                        \"Prioritize recent explicit feedback over implicit observations.\"\n)\n\n# Add memory with custom processing\nresult = memory.add(\n    messages=\"I really love the new dark mode feature in the settings panel\",\n    user_id=\"alice\"\n)\n```\n\n资料来源：[docs/open-source/features/custom-instructions.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/custom-instructions.mdx)\n\n## Bulk Import\n\nMem0 supports bulk importing of memories from JSON files, enabling migration from other systems or initial data population.\n\n**Import file format:**\n\n```json\n[\n  {\n    \"memory\": \"User prefers dark mode\",\n    \"user_id\": \"alice\",\n    \"metadata\": {\"category\": \"preference\"}\n  },\n  {\n    \"text\": \"Agent uses fallback strategy when API fails\",\n    \"agent_id\": \"agent-123\",\n    \"metadata\": {\"behavior\": \"error-handling\"}\n  },\n  {\n    \"content\": \"Application has rate limiting enabled\",\n    \"app_id\": \"app-production\",\n    \"metadata\": {\"configuration\": true}\n  }\n]\n```\n\n**CLI import command:**\n\n```bash\nmem0 import data.json --user-id alice\n```\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n## Agent Mode\n\nThe CLI supports an agent mode that formats output specifically for AI agent tool loops. This mode returns structured JSON that can be easily parsed by AI systems for decision-making.\n\n**Agent mode usage:**\n\n```bash\nmem0 --agent search \"user preferences\" --user-id alice\nmem0 --agent add \"User prefers dark mode\" --user-id alice\nmem0 --agent list --user-id alice\n```\n\n资料来源：[cli/python/README.md](https://github.com/mem0ai/mem0/blob/main/cli/python/README.md)\n\n## Dashboard Memory Management\n\nThe Mem0 dashboard provides a web-based interface for viewing, searching, and managing memories. The memory operations are accessible through a visual interface that includes pagination, detail views, and deletion confirmation modals.\n\n**Dashboard features:**\n\n- Paginated memory listing with navigation controls\n- Memory detail view showing content, ID, timestamps, and metadata\n- Inline deletion with confirmation modal\n- Search functionality within the memories page\n\n资料来源：[server/dashboard/src/app/(root)/dashboard/memories/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/memories/page.tsx)\n\n## Configuration and Status\n\nThe Mem0 CLI provides commands for managing configuration and verifying connectivity.\n\n**Configuration commands:**\n\n```bash\nmem0 config show              # Display current config (secrets redacted)\nmem0 config get api_key       # Get a specific value\nmem0 config set user_id bob   # Set a value\n\nmem0 status                   # Verify API connection and display project\nmem0 version                  # Print CLI version\n```\n\n## Operation Flow Summary\n\n```mermaid\ngraph LR\n    A[Client Request] --> B{Operation Type}\n    \n    B -->|add| C[Process & Store]\n    B -->|search| D[Embed Query & Search]\n    B -->|get| E[Direct Lookup]\n    B -->|update| F[Modify & Re-index]\n    B -->|delete| G[Remove from Store]\n    \n    C --> H[(Vector Store)]\n    D --> H\n    E --> H\n    F --> H\n    G --> H\n    \n    C --> I[Event ID]\n    D --> J[Results]\n    E --> K[Memory Object]\n    F --> K\n    G --> L[Confirmation]\n```\n\n## Error Handling\n\nMemory operations may encounter various error conditions that should be handled appropriately in client applications.\n\n**Common error scenarios:**\n\n| Error | Cause | Resolution |\n|-------|-------|------------|\n| `EntityNotFoundError` | Referenced user/agent/app doesn't exist | Verify entity IDs before operations |\n| `MemoryNotFoundError` | Memory ID doesn't exist | Check memory ID or use search |\n| `ValidationError` | Invalid input format | Validate request parameters |\n| `RateLimitError` | API rate limit exceeded | Implement exponential backoff |\n| `ConnectionError` | Network or API endpoint unavailable | Retry with circuit breaker |\n\n## See Also\n\n- [Memory Types](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-types.mdx) - Understanding semantic, episodic, procedural, and long-term memory\n- [Async Memory](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/async-memory.mdx) - Large-scale asynchronous operations\n- [Metadata Filtering](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/metadata-filtering.mdx) - Advanced filtering capabilities\n- [Custom Instructions](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/custom-instructions.mdx) - Customizing memory processing behavior\n\n---\n\n<a id='page-ai-integration'></a>\n\n## AI Model Integration\n\n### 相关页面\n\n相关主题：[Memory Operations](#page-memory-operations), [Embedding Models](#page-embeddings), [System Architecture](#page-architecture)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/llms/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/llms/base.py)\n- [mem0/llms/openai.py](https://github.com/mem0ai/mem0/blob/main/mem0/llms/openai.py)\n- [mem0/llms/anthropic.py](https://github.com/mem0ai/mem0/blob/main/mem0/llms/anthropic.py)\n- [mem0/llms/azure_openai.py](https://github.com/mem0ai/mem0/blob/main/mem0/llms/azure_openai.py)\n- [mem0/llms/gemini.py](https://github.com/mem0ai/mem0/blob/main/mem0/llms/gemini.py)\n- [mem0/configs/llms/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/configs/llms/base.py)\n- [mem0/configs/llms/__init__.py](https://github.com/mem0ai/mem0/blob/main/mem0/configs/llms/__init__.py)\n- [docs/components/llms/overview.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/llms/overview.mdx)\n- [docs/components/llms/models/openai.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/llms/models/openai.mdx)\n- [docs/components/llms/models/anthropic.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/llms/models/anthropic.mdx)\n</details>\n\n# AI Model Integration\n\n## Overview\n\nThe AI Model Integration module in mem0 provides a unified abstraction layer for interacting with various large language model (LLM) providers. This architecture enables seamless switching between different AI backends while maintaining a consistent interface for memory operations. 资料来源：[docs/components/llms/overview.mdx:1-5]()\n\n## Architecture\n\nThe integration follows a **Provider Pattern** with a base class defining the contract and provider-specific implementations extending it.\n\n```mermaid\ngraph TD\n    A[mem0 Core] --> B[LLM Base Interface]\n    B --> C[OpenAI Provider]\n    B --> D[Anthropic Provider]\n    B --> E[Azure OpenAI Provider]\n    B --> F[Gemini Provider]\n    \n    C --> G[OpenAI API]\n    D --> H[Anthropic API]\n    E --> I[Azure Cognitive Services]\n    F --> J[Google AI API]\n```\n\n## Supported Providers\n\n| Provider | Model Class | API Type | Status |\n|----------|-------------|----------|--------|\n| OpenAI | `OpenAILargeLanguageModel` | REST | Production |\n| Anthropic | `AnthropicLargeLanguageModel` | REST | Production |\n| Azure OpenAI | `AzureOpenAILargeLanguageModel` | REST | Production |\n| Google Gemini | `GeminiLargeLanguageModel` | REST | Production |\n\n资料来源：[mem0/llms/base.py:1-20]()\n\n## Base Interface\n\nAll LLM providers inherit from `LargeLanguageModel` base class which defines the core contract:\n\n```python\nclass LargeLanguageModel(ABC):\n    @abstractmethod\n    def generate_response(self, messages, **kwargs):\n        pass\n    \n    @abstractmethod\n    def get_model_name(self):\n        pass\n```\n\n资料来源：[mem0/llms/base.py:15-30]()\n\n### Core Methods\n\n| Method | Purpose | Parameters |\n|--------|---------|------------|\n| `generate_response` | Generate text completion | `messages`, `**kwargs` |\n| `get_model_name` | Return model identifier | None |\n\n## Provider Implementations\n\n### OpenAI Integration\n\nThe OpenAI provider supports GPT-4, GPT-4 Turbo, and GPT-3.5 Turbo models through the OpenAI API.\n\n```python\nclass OpenAILargeLanguageModel(LargeLanguageModel):\n    def __init__(\n        self,\n        model: str = \"gpt-4\",\n        api_key: str = None,\n        temperature: float = 0.7,\n        max_tokens: int = 2000,\n        **kwargs\n    ):\n```\n\n资料来源：[mem0/llms/openai.py:10-25]()\n\n**Configuration Parameters:**\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `model` | `str` | `\"gpt-4\"` | Model identifier |\n| `api_key` | `str` | `None` | OpenAI API key |\n| `temperature` | `float` | `0.7` | Response randomness |\n| `max_tokens` | `int` | `2000` | Maximum response length |\n\n**Environment Variable:** `OPENAI_API_KEY`\n\n资料来源：[docs/components/llms/models/openai.mdx:1-15]()\n\n### Anthropic Integration\n\nThe Anthropic provider enables access to Claude models through the Anthropic API.\n\n```python\nclass AnthropicLargeLanguageModel(LargeLanguageModel):\n    def __init__(\n        self,\n        model: str = \"claude-3-5-sonnet-20241022\",\n        api_key: str = None,\n        temperature: float = 0.7,\n        max_tokens: int = 2000,\n        **kwargs\n    ):\n```\n\n资料来源：[mem0/llms/anthropic.py:10-25]()\n\n**Supported Models:**\n\n| Model | Context Window | Best For |\n|-------|----------------|----------|\n| `claude-3-5-sonnet-20241022` | 200K tokens | Balanced performance |\n| `claude-3-opus-20240229` | 200K tokens | Complex reasoning |\n| `claude-3-haiku-20240307` | 200K tokens | Fast, cost-effective |\n\n**Environment Variable:** `ANTHROPIC_API_KEY`\n\n资料来源：[docs/components/llms/models/anthropic.mdx:1-20]()\n\n### Azure OpenAI Integration\n\nAzure OpenAI provides enterprise-grade access with compliance features and regional deployment options.\n\n```python\nclass AzureOpenAILargeLanguageModel(LargeLanguageModel):\n    def __init__(\n        self,\n        model: str = \"gpt-4\",\n        api_key: str = None,\n        azure_endpoint: str = None,\n        api_version: str = \"2024-02-01\",\n        temperature: float = 0.7,\n        max_tokens: int = 2000,\n        **kwargs\n    ):\n```\n\n资料来源：[mem0/llms/azure_openai.py:10-30]()\n\n**Azure-Specific Parameters:**\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `azure_endpoint` | `str` | Yes | Azure endpoint URL |\n| `api_version` | `str` | Yes | API version string |\n| `azure_deployment` | `str` | No | Deployment name |\n\n**Environment Variables:**\n- `AZURE_OPENAI_API_KEY`\n- `AZURE_OPENAI_ENDPOINT`\n\n### Google Gemini Integration\n\nThe Gemini provider integrates with Google AI's Gemini models for multimodal capabilities.\n\n```python\nclass GeminiLargeLanguageModel(LargeLanguageModel):\n    def __init__(\n        self,\n        model: str = \"gemini-2.0-flash-exp\",\n        api_key: str = None,\n        temperature: float = 0.7,\n        max_tokens: int = 2000,\n        **kwargs\n    ):\n```\n\n资料来源：[mem0/llms/gemini.py:10-25]()\n\n**Supported Models:**\n\n| Model | Context Window | Features |\n|-------|----------------|----------|\n| `gemini-2.0-flash-exp` | 1M tokens | Latest, fastest |\n| `gemini-1.5-pro` | 1M tokens | Long context |\n| `gemini-1.5-flash` | 1M tokens | Balanced |\n\n**Environment Variable:** `GEMINI_API_KEY`\n\n## Configuration System\n\n### Base Configuration\n\nAll LLM configurations inherit from `LLMConfig` using Pydantic for validation:\n\n```python\nclass LLMConfig(BaseModel):\n    provider: str\n    model: str\n    temperature: float = 0.7\n    max_tokens: int = 2000\n    extra_params: dict = {}\n```\n\n资料来源：[mem0/configs/llms/base.py:1-20]()\n\n### Configuration Factory\n\nThe `LLMConfigs` class provides a centralized configuration registry:\n\n```python\nclass LLMConfigs:\n    @staticmethod\n    def get_config(provider: str) -> LLMConfig:\n        # Returns provider-specific configuration\n        pass\n```\n\n资料来源：[mem0/configs/llms/__init__.py:1-30]()\n\n## Usage Patterns\n\n### Direct Instantiation\n\n```python\nfrom mem0.llms.openai import OpenAILargeLanguageModel\n\nllm = OpenAILargeLanguageModel(\n    model=\"gpt-4\",\n    temperature=0.3,\n    max_tokens=1000\n)\n\nresponse = llm.generate_response(messages=[\n    {\"role\": \"user\", \"content\": \"Summarize my notes\"}\n])\n```\n\n### Configuration-Based\n\n```python\nfrom mem0.configs.llms import LLMConfigs\n\nconfig = LLMConfigs.get_config(\"openai\")\nllm = config.initialize()\n```\n\n## Message Format\n\nAll providers accept a standardized message format:\n\n```python\nmessages = [\n    {\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n    {\"role\": \"user\", \"content\": \"What is mem0?\"},\n    {\"role\": \"assistant\", \"content\": \"Mem0 is a memory system...\"},\n    {\"role\": \"user\", \"content\": \"Tell me more\"}\n]\n```\n\n| Role | Description |\n|------|-------------|\n| `system` | System-level instructions |\n| `user` | User input messages |\n| `assistant` | Model responses |\n\n## Error Handling\n\nAll LLM providers implement consistent error handling:\n\n```python\ntry:\n    response = llm.generate_response(messages)\nexcept AuthenticationError:\n    # Handle invalid API key\n    pass\nexcept RateLimitError:\n    # Handle rate limiting\n    pass\nexcept APIConnectionError:\n    # Handle connection issues\n    pass\n```\n\n## Extending the Framework\n\nTo add a new LLM provider:\n\n1. Create a new class inheriting from `LargeLanguageModel`\n2. Implement `generate_response()` and `get_model_name()` methods\n3. Add provider-specific configuration in `mem0/configs/llms/`\n4. Register the provider in the configuration factory\n\n```python\nclass CustomLLM(LargeLanguageModel):\n    def __init__(self, model: str = \"custom-model\", **kwargs):\n        self.model = model\n    \n    def generate_response(self, messages, **kwargs):\n        # Implementation\n        pass\n    \n    def get_model_name(self):\n        return self.model\n```\n\n## Security Considerations\n\n- API keys should be provided via environment variables, not hardcoded\n- Rate limiting is handled by the underlying provider APIs\n- Azure OpenAI supports managed identity for enterprise deployments\n- Gemini supports API key restrictions in Google Cloud Console\n\n---\n\n<a id='page-vector-stores'></a>\n\n## Vector Stores and Storage\n\n### 相关页面\n\n相关主题：[System Architecture](#page-architecture), [Embedding Models](#page-embeddings), [Memory Operations](#page-memory-operations)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/vector_stores/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/base.py)\n- [mem0/vector_stores/pinecone.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/pinecone.py)\n- [mem0/vector_stores/qdrant.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/qdrant.py)\n- [mem0/vector_stores/chroma.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/chroma.py)\n- [mem0/vector_stores/pgvector.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/pgvector.py)\n- [mem0/vector_stores/weaviate.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/weaviate.py)\n- [mem0/vector_stores/redis.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/redis.py)\n- [mem0/vector_stores/configs.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/configs.py)\n- [docs/components/vectordbs/overview.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/vectordbs/overview.mdx)\n- [docs/components/vectordbs/config.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/vectordbs/config.mdx)\n</details>\n\n# Vector Stores and Storage\n\n## Overview\n\nVector stores in Mem0 provide the foundational persistence layer for semantic memory storage and retrieval. Mem0 supports multiple vector database backends, allowing users to choose the storage solution that best fits their infrastructure requirements, scale needs, and operational constraints.\n\nThe vector store system enables:\n\n- **Semantic Search**: Store memory embeddings and retrieve relevant memories based on cosine similarity\n- **Multi-Provider Support**: Integrate with popular vector databases including Pinecone, Qdrant, Chroma, PGVector, Weaviate, and Redis\n- **Unified Interface**: Consistent API across all providers through an abstract base class\n- **Metadata Filtering**: Filter memories by user_id, agent_id, run_id, and custom metadata\n- **Scalability**: Support for both local development (Chroma) and production-scale deployments (Pinecone, Qdrant)\n\n资料来源：[docs/components/vectordbs/overview.mdx]()\n\n## Architecture\n\nMem0 implements a provider-based architecture for vector stores. The system consists of:\n\n1. **Base Vector Store Interface**: Abstract class defining the contract all providers must implement\n2. **Provider Implementations**: Concrete implementations for each supported vector database\n3. **Configuration System**: Provider-specific configuration management\n4. **Factory Pattern**: Dynamic instantiation based on provider selection\n\n```mermaid\ngraph TD\n    A[Mem0 Memory Core] --> B[VectorStoreFactory]\n    B --> C[BaseVectorStore]\n    C --> D[Pinecone]\n    C --> E[Qdrant]\n    C --> F[Chroma]\n    C --> G[PGVector]\n    C --> H[Weaviate]\n    C --> I[Redis]\n    \n    J[Embedding Service] --> K[Vector Store]\n    K --> L[Semantic Search Results]\n```\n\n资料来源：[mem0/vector_stores/base.py]()\n\n## Base Vector Store Interface\n\nAll vector store providers inherit from `BaseVectorStore`, which defines the core operations required for memory storage and retrieval.\n\n### Core Methods\n\n| Method | Description |\n|--------|-------------|\n| `add` | Insert vectors with associated metadata into the store |\n| `search` | Query vectors by semantic similarity with optional filters |\n| `get` | Retrieve specific vector entries by ID |\n| `delete` | Remove vectors from the store |\n| `update` | Modify existing vector entries |\n| `list` | List all vectors with optional pagination and filters |\n\n资料来源：[mem0/vector_stores/base.py]()\n\n### Data Model\n\nEach vector entry in the store contains:\n\n```python\n{\n    \"id\": str,           # Unique identifier (UUID)\n    \"vector\": List[float],  # Embedding vector\n    \"data\": str,         # Original text content\n    \"metadata\": {\n        \"user_id\": str,\n        \"agent_id\": Optional[str],\n        \"run_id\": Optional[str],\n        \"event\": Optional[str],\n        \"created_at\": str,\n        \"memory_type\": Optional[str]\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/base.py]()\n\n## Supported Providers\n\n### Provider Comparison\n\n| Provider | Type | Deployment | Scalability | Use Case |\n|----------|------|------------|-------------|----------|\n| **Chroma** | Local/Embedded | In-process | Low | Development, prototyping |\n| **Pinecone** | Cloud/Managed | Hosted | Very High | Production at scale |\n| **Qdrant** | Self-hosted/Cloud | Docker/K8s | High | Self-hosted production |\n| **PGVector** | Self-hosted | PostgreSQL extension | High | Existing Postgres infra |\n| **Weaviate** | Self-hosted/Cloud | Docker/K8s | High | Knowledge graphs |\n| **Redis** | Self-hosted/Cloud | Redis Stack | Medium | Cache + vector hybrid |\n\n资料来源：[docs/components/vectordbs/overview.mdx]()\n\n### Chroma (Development)\n\nChroma is the default vector store for local development and testing. It runs as an embedded database within the application process.\n\n**Characteristics:**\n- Zero-configuration setup\n- In-process operation\n- File-based persistence\n- Best for development and evaluation\n\n资料来源：[mem0/vector_stores/chroma.py]()\n\n### Pinecone (Cloud)\n\nPinecone is a managed vector database service offering serverless and pod-based deployments.\n\n**Configuration:**\n```python\n{\n    \"vector_store\": {\n        \"provider\": \"pinecone\",\n        \"config\": {\n            \"api_key\": \"your-api-key\",\n            \"index_name\": \"mem0-memory\",\n            \"environment\": \"gcp-starter\"\n        }\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/pinecone.py](), [docs/components/vectordbs/config.mdx]()\n\n### Qdrant (Self-hosted/Cloud)\n\nQdrant is an open-source vector search engine with both self-hosted and cloud options.\n\n**Configuration:**\n```python\n{\n    \"vector_store\": {\n        \"provider\": \"qdrant\",\n        \"config\": {\n            \"host\": \"localhost\",\n            \"port\": 6333,\n            \"collection_name\": \"mem0\"\n        }\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/qdrant.py]()\n\n### PGVector (PostgreSQL)\n\nPGVector extends PostgreSQL with vector similarity search capabilities, ideal for applications already using PostgreSQL.\n\n**Configuration:**\n```python\n{\n    \"vector_store\": {\n        \"provider\": \"pgvector\",\n        \"config\": {\n            \"host\": \"localhost\",\n            \"port\": 5432,\n            \"dbname\": \"mem0\",\n            \"user\": \"postgres\",\n            \"password\": \"password\"\n        }\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/pgvector.py]()\n\n### Weaviate\n\nWeaviate is an open-source vector database with built-in support for hybrid search and knowledge graphs.\n\n**Configuration:**\n```python\n{\n    \"vector_store\": {\n        \"provider\": \"weaviate\",\n        \"config\": {\n            \"url\": \"http://localhost:8080\",\n            \"api_key\": \"your-api-key\",  # Optional, for cloud\n            \"index_name\": \"Mem0\"\n        }\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/weaviate.py]()\n\n### Redis\n\nRedis Stack provides vector search capabilities built on the popular in-memory data store.\n\n**Configuration:**\n```python\n{\n    \"vector_store\": {\n        \"provider\": \"redis\",\n        \"config\": {\n            \"host\": \"localhost\",\n            \"port\": 6379,\n            \"index_name\": \"mem0\",\n            \"password\": \"password\"  # Optional\n        }\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/redis.py]()\n\n## Configuration System\n\n### Configuration Schema\n\nThe vector store configuration is defined in `configs.py` and follows a structured schema:\n\n```python\n@dataclass\nclass VectorStoreConfig:\n    provider: str                    # Provider name\n    collection_name: str             # Collection/index name\n    embedding_model_dims: int        # Embedding dimension size\n    api_key: Optional[str] = None   # Provider API key\n    # ... additional provider-specific fields\n```\n\n资料来源：[mem0/vector_stores/configs.py]()\n\n### Configuration File\n\nVector store settings are typically defined in `config.yaml`:\n\n```yaml\nvector_store:\n  provider: \"chroma\"  # or pinecone, qdrant, pgvector, weaviate, redis\n  collection_name: \"mem0\"\n  embedding_model_dims: 1536\n```\n\n资料来源：[docs/components/vectordbs/config.mdx]()\n\n### Environment Variables\n\nMany providers support configuration via environment variables:\n\n| Variable | Provider | Description |\n|----------|----------|-------------|\n| `PINECONE_API_KEY` | Pinecone | Pinecone API key |\n| `QDRANT_HOST` | Qdrant | Qdrant server host |\n| `REDIS_PASSWORD` | Redis | Redis authentication |\n| `WEAVIATE_API_KEY` | Weaviate | Weaviate cloud API key |\n\n## Search Operations\n\n### Semantic Search\n\nThe primary operation for memory retrieval is semantic search, which finds vectors most similar to a query embedding.\n\n```python\nresults = vector_store.search(\n    query=\"user's preference for morning coffee\",\n    limit=5,\n    filters={\n        \"user_id\": \"user-123\"\n    }\n)\n```\n\n### Search Parameters\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `query` | str | Required | Search query text |\n| `limit` | int | 10 | Maximum results to return |\n| `filters` | dict | None | Metadata filters |\n| `min_score` | float | None | Minimum similarity threshold |\n\n资料来源：[mem0/vector_stores/base.py]()\n\n### Metadata Filtering\n\nMem0 supports filtering search results by various metadata fields:\n\n```python\nfilters = {\n    \"user_id\": \"user-123\",           # Required: filter by user\n    \"agent_id\": \"agent-456\",         # Optional: filter by agent\n    \"run_id\": \"run-789\",             # Optional: filter by session\n    \"memory_type\": \"preference\",     # Optional: filter by type\n    \"created_at\": {\"$gte\": \"2024-01-01\"}  # Optional: time-based\n}\n```\n\n## Memory Management\n\n### Adding Memories\n\n```python\nvector_store.add(\n    vectors=embeddings,\n    documents=memory_texts,\n    metadatas=metadata_list\n)\n```\n\n### Updating Memories\n\n```python\nvector_store.update(\n    id=\"memory-uuid\",\n    vector=new_embedding,\n    data=new_text,\n    metadata=updated_metadata\n)\n```\n\n### Deleting Memories\n\n```python\n# Delete single memory\nvector_store.delete(id=\"memory-uuid\")\n\n# Delete all memories for a user\nvector_store.delete(filters={\"user_id\": \"user-123\"})\n\n# Delete all memories\nvector_store.delete(delete_all=True)\n```\n\n## Embedding Integration\n\nVector stores work in conjunction with Mem0's embedding service to convert text into vector representations.\n\n```mermaid\ngraph LR\n    A[User Message] --> B[Embedding Service]\n    B --> C[Embedding Vector]\n    C --> D[Vector Store]\n    D --> E[Storage / Retrieval]\n    \n    F[Search Query] --> G[Embedding Service]\n    G --> H[Query Vector]\n    H --> D\n    D --> I[Similarity Search]\n    I --> J[Top-K Results]\n```\n\nThe embedding dimension must match the vector store configuration. Mem0 uses 1536 dimensions by default (OpenAI text-embedding-3-small).\n\n资料来源：[docs/components/vectordbs/overview.mdx]()\n\n## Best Practices\n\n### Development vs Production\n\n| Aspect | Development | Production |\n|--------|-------------|------------|\n| **Provider** | Chroma | Pinecone/Qdrant/PGVector |\n| **Deployment** | Local embedded | Managed/self-hosted |\n| **Persistence** | File-based | Cloud/server |\n| **Scaling** | Limited | Horizontal |\n\n### Performance Considerations\n\n1. **Index Management**: Ensure proper indexing is configured for your provider\n2. **Batch Operations**: Use batch inserts when adding multiple memories\n3. **Connection Pooling**: Configure connection pools for high-throughput scenarios\n4. **Embedding Cache**: Cache embeddings to avoid redundant computations\n\n### Security\n\n- Store API keys in environment variables, not in configuration files\n- Use TLS/SSL connections for production deployments\n- Implement proper access controls based on user_id filtering\n\n## Troubleshooting\n\n### Common Issues\n\n| Issue | Cause | Solution |\n|-------|-------|----------|\n| `Dimension mismatch` | Embedding model dims != index config | Update `embedding_model_dims` in config |\n| `Connection refused` | Wrong host/port | Verify provider configuration |\n| `Authentication failed` | Invalid API key | Check API key in environment |\n| `Index not found` | Collection doesn't exist | Create index or use auto-creation |\n\n### Debug Mode\n\nEnable verbose logging for vector store operations:\n\n```python\nimport logging\nlogging.getLogger(\"mem0.vector_stores\").setLevel(logging.DEBUG)\n```\n\n## See Also\n\n- [Memory Core](memory-core) - The main memory orchestration layer\n- [Embedding Services](embeddings) - Text vectorization\n- [Configuration Guide](../setup/configuration) - Full configuration reference\n- [Deployment Guide](../deployment) - Production deployment patterns\n\n---\n\n<a id='page-embeddings'></a>\n\n## Embedding Models\n\n### 相关页面\n\n相关主题：[AI Model Integration](#page-ai-integration), [Vector Stores and Storage](#page-vector-stores)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/embeddings/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/base.py)\n- [mem0/embeddings/openai.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/openai.py)\n- [mem0/embeddings/azure_openai.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/azure_openai.py)\n- [mem0/embeddings/huggingface.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/huggingface.py)\n- [mem0/embeddings/ollama.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/ollama.py)\n- [mem0/embeddings/configs.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/configs.py)\n- [docs/components/embedders/overview.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/embedders/overview.mdx)\n- [docs/components/embedders/models/openai.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/embedders/models/openai.mdx)\n- [docs/components/embedders/models/huggingface.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/embedders/models/huggingface.mdx)\n- [docs/components/embedders/models/ollama.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/embedders/models/ollama.mdx)\n</details>\n\n# Embedding Models\n\nEmbedding models are a fundamental component of the mem0 memory system. They transform textual information into dense vector representations (embeddings) that enable semantic search, similarity matching, and efficient memory retrieval. The embedding layer sits at the core of mem0's architecture, bridging raw user interactions with the vector-based storage layer.\n\n## Overview\n\nMem0 provides a flexible, provider-agnostic embedding abstraction that supports multiple embedding backends while maintaining a consistent interface. This design allows users to choose embedding providers based on their requirements for cost, latency, privacy, or quality.\n\nThe embedding system in mem0 is built around an abstract base class that defines the contract for all concrete implementations. Each provider implementation handles the specifics of API communication, response parsing, and error handling while conforming to the unified interface.\n\n**Key characteristics of mem0's embedding layer:**\n\n- Provider-agnostic abstraction with consistent API across implementations\n- Support for both cloud-based and local embedding models\n- Configuration-driven provider selection\n- Seamless integration with the vector storage layer\n- Extensible architecture for adding custom embedding providers\n\n## Architecture\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Memory Layer]\n    B --> C[Embedding Module]\n    C --> D[Vector Store]\n    \n    C --> E[OpenAI Embedder]\n    C --> F[Azure OpenAI Embedder]\n    C --> G[HuggingFace Embedder]\n    C --> H[Ollama Embedder]\n    \n    E --> I[text-embedding-3-small]\n    F --> J[Azure OpenAI Models]\n    G --> K[HF Sentence Transformers]\n    H --> L[Local Ollama Models]\n    \n    D --> M[Semantic Search]\n    D --> N[Memory Retrieval]\n    D --> O[Similarity Matching]\n```\n\n## Supported Providers\n\nMem0 supports multiple embedding providers to accommodate various deployment scenarios. Each provider implements the same abstract interface, allowing transparent switching between backends.\n\n### Provider Comparison\n\n| Provider | Type | Default Model | API Key Required | Local Model Support |\n|----------|------|---------------|------------------|---------------------|\n| OpenAI | Cloud | `text-embedding-3-small` | Yes | No |\n| Azure OpenAI | Cloud | Configurable | Yes | No |\n| HuggingFace | Cloud/Self-hosted | Various sentence-transformers | Optional | Yes |\n| Ollama | Local | `nomic-embed-text` | No | Yes |\n\n## Configuration\n\nEmbedding models are configured through the mem0 configuration system. Each provider has its own configuration parameters, but all share a common structure.\n\n### Basic Configuration\n\n```python\nfrom mem0 import Memory\n\nconfig = {\n    \"embedder\": {\n        \"provider\": \"openai\",\n        \"config\": {\n            \"model\": \"text-embedding-3-small\",\n            \"api_key\": \"sk-...\"\n        }\n    }\n}\n\nmemory = Memory.from_config(config)\n```\n\n### Environment Variable Configuration\n\nMany configuration parameters can be set via environment variables, simplifying deployment and reducing boilerplate code:\n\n| Environment Variable | Description | Provider |\n|---------------------|-------------|----------|\n| `OPENAI_API_KEY` | OpenAI API key for embeddings | OpenAI |\n| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key | Azure OpenAI |\n| `HF_TOKEN` | HuggingFace API token | HuggingFace |\n| `OLLAMA_BASE_URL` | Ollama server URL | Ollama |\n\n## OpenAI Embeddings\n\nThe OpenAI embedder provides access to OpenAI's embedding models through the official API. It is the default provider in mem0 and offers a balance of quality, cost, and ease of use.\n\n### Supported Models\n\n| Model | Dimensions | Output Format | Use Case |\n|-------|------------|---------------|----------|\n| `text-embedding-3-small` | 1536 | Float32 | General purpose, recommended |\n| `text-embedding-3-large` | 3072 | Float32 | Higher quality, larger vectors |\n| `text-embedding-ada-002` | 1536 | Float32 | Legacy model, compatible |\n\n### Configuration Options\n\n```python\n{\n    \"provider\": \"openai\",\n    \"config\": {\n        \"model\": \"text-embedding-3-small\",  # Optional, defaults to text-embedding-3-small\n        \"api_key\": \"sk-...\",                 # Optional if OPENAI_API_KEY is set\n        \"base_url\": \"https://api.openai.com/v1\",  # Optional, for proxies\n        \"timeout\": 60,                       # Optional, request timeout in seconds\n        \"max_retries\": 3                     # Optional, number of retries on failure\n    }\n}\n```\n\n## Azure OpenAI Embeddings\n\nAzure OpenAI embeddings provide the same model quality as OpenAI with enterprise-grade security, compliance, and regional availability. This is the preferred option for organizations requiring Azure infrastructure.\n\n### Configuration Options\n\n```python\n{\n    \"provider\": \"azure_openai\",\n    \"config\": {\n        \"model\": \"text-embedding-3-small\",    # Model deployment name\n        \"api_key\": \"your-azure-api-key\",\n        \"azure_endpoint\": \"https://your-resource.openai.azure.com\",\n        \"azure_deployment\": \"your-deployment-name\",\n        \"api_version\": \"2024-02-01\"           # Optional, Azure API version\n    }\n}\n```\n\n## HuggingFace Embeddings\n\nThe HuggingFace embedder supports both cloud-based inference and local models from the HuggingFace ecosystem. This provides flexibility for privacy-sensitive applications or cost optimization.\n\n### Supported Model Families\n\n| Model Type | Examples | Description |\n|------------|----------|-------------|\n| Sentence Transformers | `all-MiniLM-L6-v2`, `BAAI/bge-large` | Optimized for sentence-level embeddings |\n| Generic Transformers | `bert-base-uncased` | General-purpose transformer models |\n\n### Configuration Options\n\n```python\n{\n    \"provider\": \"huggingface\",\n    \"config\": {\n        \"model\": \"sentence-transformers/all-MiniLM-L6-v2\",  # Model identifier\n        \"token\": \"hf_...\",           # Optional, for gated models\n        \"device\": \"cpu\",             # Optional, cpu/cuda/mps\n        \"encode_kwargs\": {          # Optional, encoding parameters\n            \"normalize_embeddings\": True\n        }\n    }\n}\n```\n\n## Ollama Embeddings\n\nOllama enables running embedding models entirely locally, providing complete data privacy and no API costs. This is ideal for development, testing, or production environments with strict data residency requirements.\n\n### Supported Models\n\n| Model | Dimensions | Description |\n|-------|------------|-------------|\n| `nomic-embed-text` | 768 | High-quality, efficient embeddings |\n| `mxbai-embed-large` | 1024 | Larger model for higher quality |\n| Custom Ollama models | Variable | Any embedding model available in Ollama |\n\n### Configuration Options\n\n```python\n{\n    \"provider\": \"ollama\",\n    \"config\": {\n        \"model\": \"nomic-embed-text\",        # Model name\n        \"base_url\": \"http://localhost:11434\" # Ollama server URL\n    }\n}\n```\n\n## Base Interface\n\nAll embedding providers inherit from the abstract base class that defines the standard interface:\n\n```python\nclass EmbedderBase(ABC):\n    @abstractmethod\n    def embed(self, text: str) -> List[float]:\n        \"\"\"Generate embedding vector for a single text.\"\"\"\n        pass\n    \n    @abstractmethod\n    def embed_batch(self, texts: List[str]) -> List[List[float]]:\n        \"\"\"Generate embedding vectors for multiple texts.\"\"\"\n        pass\n    \n    @abstractmethod\n    def get_vector_size(self) -> int:\n        \"\"\"Return the dimensionality of embedding vectors.\"\"\"\n        pass\n```\n\n## Usage Patterns\n\n### Single Text Embedding\n\n```python\nfrom mem0 import Memory\n\nmemory = Memory()\nresult = memory.add(\"User prefers dark mode theme\", user_id=\"user123\")\n```\n\n### Batch Embedding\n\n```python\nfrom mem0 import Memory\n\nmemory = Memory()\nmessages = [\n    \"User lives in San Francisco\",\n    \"Prefers coffee over tea\",\n    \"Works as a software engineer\"\n]\nresult = memory.add_batch(messages, user_id=\"user123\")\n```\n\n### Semantic Search with Custom Embedder\n\n```python\nfrom mem0 import Memory\n\nconfig = {\n    \"embedder\": {\n        \"provider\": \"ollama\",\n        \"config\": {\n            \"model\": \"nomic-embed-text\",\n            \"base_url\": \"http://localhost:11434\"\n        }\n    }\n}\n\nmemory = Memory.from_config(config)\nresults = memory.search(\"What are the user's preferences?\", user_id=\"user123\")\n```\n\n## Extending with Custom Providers\n\nTo add a new embedding provider, implement the `EmbedderBase` abstract class:\n\n```python\nfrom mem0.embeddings.base import EmbedderBase\n\nclass CustomEmbedder(EmbedderBase):\n    def __init__(self, config: dict):\n        self.config = config\n        # Initialize your embedding client\n    \n    def embed(self, text: str) -> List[float]:\n        # Implement single text embedding\n        pass\n    \n    def embed_batch(self, texts: List[str]) -> List[List[float]]:\n        # Implement batch embedding\n        pass\n    \n    def get_vector_size(self) -> int:\n        # Return embedding dimensions\n        pass\n```\n\n## Best Practices\n\n1. **Model Selection**: Choose `text-embedding-3-small` for general use cases as it offers the best balance of quality and cost. Use `text-embedding-3-large` when higher accuracy is required.\n\n2. **Local Deployment**: For privacy-sensitive applications, use Ollama with `nomic-embed-text` to keep all data local.\n\n3. **Consistent Embedding Dimensions**: Ensure all memories use the same embedding model and configuration for proper similarity calculations.\n\n4. **API Key Management**: Use environment variables for API keys in production environments rather than hardcoding credentials.\n\n5. **Error Handling**: Implement appropriate retry logic and timeout settings, especially when using cloud-based embedding providers.\n\n## Related Components\n\n- **Vector Store**: The embedding layer feeds into the vector storage system for efficient similarity search\n- **Memory Layer**: High-level memory operations use embeddings for storage and retrieval\n- **Configuration System**: Centralized configuration management for all embedding providers\n\n---\n\n<a id='page-python-sdk'></a>\n\n## Python SDK\n\n### 相关页面\n\n相关主题：[Memory Operations](#page-memory-operations), [TypeScript/Node.js SDK](#page-typescript-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/__init__.py](https://github.com/mem0ai/mem0/blob/main/mem0/__init__.py)\n- [mem0/memory/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/main.py)\n- [mem0/client/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/client/main.py)\n- [mem0/configs/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/configs/base.py)\n- [mem0/exceptions.py](https://github.com/mem0ai/mem0/blob/main/mem0/exceptions.py)\n- [docs/open-source/python-quickstart.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/python-quickstart.mdx)\n- [docs/api-reference.mdx](https://github.com/mem0ai/mem0/blob/main/docs/api-reference.mdx)\n</details>\n\n# Python SDK\n\n## Overview\n\nThe mem0 Python SDK provides a programmatic interface for integrating memory management capabilities into AI applications. It enables developers to store, retrieve, search, and manage persistent memory across AI agent interactions, supporting both self-hosted deployments and managed cloud services.\n\n资料来源：[mem0/__init__.py:1-50]()\n\n## Architecture\n\nThe SDK is structured around three core components that handle different aspects of memory operations:\n\n```mermaid\ngraph TD\n    A[Client Layer] --> B[Memory Layer]\n    A --> C[Configuration]\n    B --> D[Vector Store]\n    B --> E[LLM Integration]\n    C --> F[BaseConfig]\n    C --> G[LLMConfig]\n    C --> H[VectorStoreConfig]\n```\n\n### Core Components\n\n| Component | File | Purpose |\n|-----------|------|---------|\n| Client | `mem0/client/main.py` | High-level API for cloud and self-hosted deployments |\n| Memory | `mem0/memory/main.py` | Core memory operations engine |\n| Configs | `mem0/configs/base.py` | Configuration management for providers |\n\n资料来源：[mem0/client/main.py:1-30]()\n\n## Installation\n\nInstall the mem0 package along with required dependencies:\n\n```bash\npip install mem0ai\n```\n\nFor specific LLM and vector store backends, install additional packages:\n\n```bash\n# OpenAI + Qdrant\npip install mem0ai[openai,qdrant]\n\n# Azure OpenAI + Chroma\npip install mem0ai[azure-openai,chromadb]\n```\n\n资料来源：[docs/open-source/python-quickstart.mdx:1-50]()\n\n## Quick Start\n\n### Basic Memory Operations\n\n```python\nfrom mem0 import Memory\n\n# Initialize memory instance\nmemory = Memory()\n\n# Add memories\nresult = memory.add(\n    messages=[\n        {\"role\": \"user\", \"content\": \"I'm planning to visit Tokyo next month.\"},\n        {\"role\": \"assistant\", \"content\": \"That's exciting! Tokyo has great places to visit.\"}\n    ],\n    user_id=\"user_123\"\n)\n\n# Search memories\nresults = memory.search(\n    query=\"What are my travel plans?\",\n    user_id=\"user_123\"\n)\n\n# Get all memories for a user\nall_memories = memory.get_all(user_id=\"user_123\")\n\n# Update a memory\nmemory.update(memory_id=\"mem_xxx\", data=\"Updated content here\")\n\n# Delete a memory\nmemory.delete(memory_id=\"mem_xxx\")\n```\n\n资料来源：[docs/open-source/python-quickstart.mdx:50-100]()\n\n## Configuration\n\n### Configuration Parameters\n\n| Parameter | Type | Description | Default |\n|-----------|------|-------------|---------|\n| `llm` | dict | LLM provider configuration | Required |\n| `vector_store` | dict | Vector store provider configuration | Required |\n| `embedder` | dict | Embedding model configuration | Optional |\n| `memory_history_limit` | int | Number of conversation turns to retain | 20 |\n\n资料来源：[mem0/configs/base.py:1-80]()\n\n### LLM Configuration\n\n```python\nfrom mem0 import Memory\nfrom mem0.configs.base import LLMConfig\n\nconfig = LLMConfig(\n    provider=\"openai\",\n    model=\"gpt-4o\",\n    api_key=\"your-api-key\"\n)\n\nmemory = Memory.from_config(llm_config=config)\n```\n\n### Vector Store Configuration\n\n```python\nfrom mem0.configs.base import VectorStoreConfig\n\nvector_config = VectorStoreConfig(\n    provider=\"qdrant\",\n    host=\"localhost\",\n    port=6333,\n    collection_name=\"memories\"\n)\n```\n\n资料来源：[mem0/configs/base.py:80-150]()\n\n## Memory Operations API\n\n### Adding Memories\n\nThe `add()` method stores new memories from conversation messages:\n\n```python\nmemory.add(\n    messages=[{\"role\": \"user\", \"content\": \"User message\"}],\n    user_id=\"user_123\",\n    session_id=\"session_456\",\n    metadata={\"source\": \"chat\"}\n)\n```\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `messages` | list[dict] | Yes | List of message objects with role and content |\n| `user_id` | str | Yes | Unique identifier for the user |\n| `session_id` | str | No | Session or conversation identifier |\n| `metadata` | dict | No | Additional metadata to attach |\n\n资料来源：[mem0/memory/main.py:100-180]()\n\n### Searching Memories\n\n```python\nresults = memory.search(\n    query=\"Find information about...\",\n    user_id=\"user_123\",\n    limit=5,\n    rerank=True\n)\n```\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `query` | str | Yes | Search query text |\n| `user_id` | str | No | Filter by user |\n| `limit` | int | No | Maximum results (default: 10) |\n| `rerank` | bool | No | Apply reranking to results |\n\n资料来源：[mem0/memory/main.py:180-250]()\n\n### Retrieving Memories\n\n```python\n# Get all memories for a user\nall_memories = memory.get_all(user_id=\"user_123\")\n\n# Get specific memory by ID\nmemory_item = memory.get(memory_id=\"mem_xxx\")\n```\n\n### Updating Memories\n\n```python\nmemory.update(\n    memory_id=\"mem_xxx\",\n    data=\"Updated memory content\",\n    metadata={\"key\": \"value\"}\n)\n```\n\n### Deleting Memories\n\n```python\n# Delete specific memory\nmemory.delete(memory_id=\"mem_xxx\")\n\n# Delete all memories for a user\nmemory.delete_all(user_id=\"user_123\")\n```\n\n资料来源：[mem0/memory/main.py:250-350]()\n\n## Client Interface\n\nThe `Mem0` client provides a unified interface for interacting with mem0 services:\n\n```python\nfrom mem0 import Mem0\n\n# Initialize client\nclient = Mem0(api_key=\"your-api-key\", app_id=\"your-app-id\")\n\n# Add memories via client\nresult = client.add(\n    messages=[{\"role\": \"user\", \"content\": \"Hello\"}],\n    user_id=\"user_123\"\n)\n```\n\n资料来源：[mem0/client/main.py:1-100]()\n\n## Exception Handling\n\nThe SDK defines custom exceptions for error handling:\n\n| Exception | Description |\n|-----------|-------------|\n| `Mem0Exception` | Base exception class for all mem0 errors |\n| `ValidationError` | Invalid input parameters |\n| `AuthenticationError` | Invalid or missing API credentials |\n| `RateLimitError` | API rate limit exceeded |\n| `NotFoundError` | Requested resource not found |\n\n资料来源：[mem0/exceptions.py:1-50]()\n\n### Handling Exceptions\n\n```python\nfrom mem0.exceptions import Mem0Exception, ValidationError\n\ntry:\n    memory.add(messages=[], user_id=\"user_123\")\nexcept ValidationError as e:\n    print(f\"Invalid input: {e}\")\nexcept Mem0Exception as e:\n    print(f\"Memory operation failed: {e}\")\n```\n\n## Data Flow\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant SDK as Python SDK\n    participant Memory as Memory Engine\n    participant Vector as Vector Store\n    participant LLM as LLM Provider\n\n    App->>SDK: memory.add(messages)\n    SDK->>Memory: process_messages()\n    Memory->>LLM: extract_and_summarize()\n    LLM-->>Memory: structured_memories\n    Memory->>Vector: store(memories)\n    Vector-->>Memory: confirm\n    Memory-->>SDK: result\n    SDK-->>App: MemoryResult\n```\n\n## Supported Providers\n\n### LLM Providers\n\n| Provider | Package | Configuration Key |\n|----------|---------|-------------------|\n| OpenAI | `openai` | `openai` |\n| Azure OpenAI | `azure-openai` | `azure_openai` |\n| Anthropic | `anthropic` | `anthropic` |\n| Groq | `groq` | `groq` |\n| Ollama | `ollama` | `ollama` |\n| LM Studio | `lmstudio` | `lmstudio` |\n\n### Vector Store Providers\n\n| Provider | Package | Configuration Key |\n|----------|---------|-------------------|\n| Qdrant | `qdrant-client` | `qdrant` |\n| Chroma | `chromadb` | `chroma` |\n| Weaviate | `weaviate-client` | `weaviate` |\n| Milvus | `pymilvus` | `milvus` |\n| Pinecone | `pinecone-client` | `pinecone` |\n\n资料来源：[mem0/configs/base.py:150-250]()\n\n## Advanced Configuration\n\n### Self-Hosted Deployment\n\n```python\nfrom mem0 import Memory\n\nmemory = Memory()\n\n# Configure with custom providers\nmemory.configure(\n    llm={\n        \"provider\": \"ollama\",\n        \"model\": \"llama3.1\",\n        \"api_base\": \"http://localhost:11434\"\n    },\n    vector_store={\n        \"provider\": \"qdrant\",\n        \"host\": \"localhost\",\n        \"port\": 6333\n    }\n)\n```\n\n资料来源：[docs/open-source/python-quickstart.mdx:100-150]()\n\n### Embedder Configuration\n\n```python\nmemory.configure(\n    embedder={\n        \"provider\": \"openai\",\n        \"model\": \"text-embedding-3-small\",\n        \"dimension\": 1536\n    }\n)\n```\n\n## Best Practices\n\n1. **User Identification**: Always provide unique `user_id` for each user to maintain proper memory isolation\n2. **Session Management**: Use `session_id` to organize memories within conversation threads\n3. **Metadata**: Attach relevant metadata for better searchability and filtering\n4. **Error Handling**: Implement proper exception handling for production applications\n5. **Configuration**: Store API keys securely using environment variables\n\n## See Also\n\n- [API Reference Documentation](https://github.com/mem0ai/mem0/blob/main/docs/api-reference.mdx)\n- [Open Source Quickstart](https://github.com/mem0ai/mem0/blob/main/docs/open-source/python-quickstart.mdx)\n- [GitHub Repository](https://github.com/mem0ai/mem0)\n\n---\n\n<a id='page-typescript-sdk'></a>\n\n## TypeScript/Node.js SDK\n\n### 相关页面\n\n相关主题：[Python SDK](#page-python-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0-ts/src/client/mem0.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.ts)\n- [mem0-ts/src/client/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/index.ts)\n- [mem0-ts/src/client/mem0.types.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.types.ts)\n- [mem0-ts/src/client/config.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/config.ts)\n- [mem0-ts/src/oss/src/memory/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/oss/src/memory/index.ts)\n- [mem0-ts/src/oss/src/types/index.ts](https://github.com/memp0ai/mem0/blob/main/mem0-ts/src/oss/src/types/index.ts)\n- [mem0-ts/README.md](https://github.com/mem0ai/mem0/blob/main/mem0-ts/README.md)\n- [mem0-ts/package.json](https://github.com/mem0ai/mem0/blob/main/mem0-ts/package.json)\n</details>\n\n# TypeScript/Node.js SDK\n\nThe mem0 TypeScript/Node.js SDK provides a robust client library for integrating memory management capabilities into JavaScript and TypeScript applications. It enables developers to store, retrieve, search, and manage persistent memory across user interactions and AI agent workflows.\n\n## Overview\n\nThe SDK offers two primary deployment modes:\n\n| Mode | Description | Use Case |\n|------|-------------|----------|\n| **Hosted (mem0ai)** | Cloud-hosted memory service with API key authentication | Production applications requiring managed infrastructure |\n| **Open Source (OSS)** | Self-hosted memory implementation running entirely within the application | Privacy-sensitive applications, on-premise deployments, custom infrastructure |\n\n资料来源：[mem0-ts/README.md](https://github.com/mem0ai/mem0/blob/main/mem0-ts/README.md)\n\n## Architecture\n\n```mermaid\ngraph TD\n    A[Application] --> B[Mem0Client]\n    B --> C{Deployment Mode}\n    C -->|Hosted| D[mem0ai Cloud API]\n    C -->|OSS| E[Local Memory Store]\n    D --> F[Vector Database]\n    E --> G[SQLite/Vector Store]\n    \n    H[Mem0Config] --> B\n    I[API Key] --> B\n```\n\nThe SDK architecture separates configuration management, client initialization, and memory operations into distinct modules. The `Mem0Client` class serves as the primary interface, accepting a `Mem0Config` object during instantiation to determine deployment mode and connection parameters.\n\n资料来源：[mem0-ts/src/client/mem0.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.ts)\n\n## Installation\n\nInstall the SDK using npm, yarn, or pnpm:\n\n```bash\nnpm install mem0ai\n# or\nyarn add mem0ai\n# or\npnpm add mem0ai\n```\n\nThe package name is `mem0ai` on npm, supporting both CommonJS and ESM module formats.\n\n资料来源：[mem0-ts/package.json](https://github.com/mem0ai/mem0/blob/main/mem0-ts/package.json)\n\n## Configuration\n\n### Mem0Config Parameters\n\n| Parameter | Type | Required | Default | Description |\n|-----------|------|----------|---------|-------------|\n| `apiKey` | `string` | Conditional | - | API key for hosted mem0ai service. Required when `orgId` or `projectId` is provided |\n| `orgId` | `string` | No | - | Organization ID for hosted deployment |\n| `projectId` | `string` | No | - | Project ID for hosted deployment |\n| `host` | `string` | No | `\"https://api.mem0.ai\"` | Base URL for hosted API endpoint |\n\n### OSS Configuration\n\n| Parameter | Type | Required | Default | Description |\n|-----------|------|----------|---------|-------------|\n| `embedder` | `Embedder` | Yes | - | Embedding model configuration for vectorization |\n| `vectorStore` | `VectorStore` | Yes | - | Vector storage backend (Chroma, Qdrant, or in-memory) |\n| `db` | `Database` | Yes | - | SQLite database for structured data |\n| `version` | `string` | No | `\"v1.0\"` | Memory schema version |\n\n资料来源：[mem0-ts/src/client/config.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/config.ts)\n\n## Client Initialization\n\n### Hosted Mode\n\n```typescript\nimport { Mem0Client } from \"mem0ai\";\n\nconst client = new Mem0Client({\n  apiKey: \"your-api-key\",\n  orgId: \"your-org-id\",    // optional\n  projectId: \"your-project-id\"  // optional\n});\n```\n\n### Open Source Mode\n\n```typescript\nimport { Mem0Client } from \"mem0ai\";\n\nconst client = new Mem0Client({\n  embedder: {\n    provider: \"openai\",\n    config: {\n      api_key: \"your-openai-key\",\n      model: \"text-embedding-3-small\"\n    }\n  },\n  vectorStore: {\n    provider: \"chroma\",\n    config: {\n      collection_name: \"memory\"\n    }\n  },\n  db: {\n    provider: \"sqlite\"\n  }\n});\n```\n\n资料来源：[mem0-ts/src/client/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/index.ts)\n\n## Core API Methods\n\n### Memory Operations\n\n| Method | Parameters | Return Type | Description |\n|--------|------------|-------------|-------------|\n| `add` | `messages`, `userId`, `metadata`, `filters` | `Promise<MemoryResult[]>` | Store new memories |\n| `search` | `query`, `userId`, `filters`, `limit` | `Promise<MemoryResult[]>` | Semantic search across memories |\n| `getAll` | `userId`, `filters` | `Promise<MemoryResult[]>` | Retrieve all memories for a user |\n| `get` | `memoryId` | `Promise<MemoryResult>` | Fetch a specific memory by ID |\n| `update` | `memoryId`, `data`, `metadata` | `Promise<MemoryResult>` | Modify existing memory content |\n| `delete` | `memoryId` | `Promise<void>` | Remove a memory entry |\n| `reset` | `userId` | `Promise<void>` | Delete all memories for a user |\n\n资料来源：[mem0-ts/src/client/mem0.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.ts)\n\n### MemoryResult Data Model\n\n```typescript\ninterface MemoryResult {\n  id: string;           // Unique memory identifier\n  memory: string;       // Memory content text\n  event: string;        // Event type (e.g., \"memory\", \"preference\", \"fact\")\n  created_at: string;   // ISO timestamp\n  updated_at: string;   // ISO timestamp\n  metadata?: {          // Optional metadata object\n    category?: string;\n    source?: string;\n    [key: string]: any;\n  };\n}\n```\n\n资料来源：[mem0-ts/src/client/mem0.types.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.types.ts)\n\n### Add Memories\n\n```typescript\n// Add a single memory\nconst memories = await client.add({\n  messages: [\n    { role: \"user\", content: \"I prefer dark mode in my IDE\" },\n    { role: \"assistant\", content: \"I'll remember that you prefer dark mode\" }\n  ],\n  userId: \"user-123\"\n});\n\n// With metadata\nconst memories = await client.add({\n  messages: [\n    { role: \"user\", content: \"Book a flight to Tokyo next month\" }\n  ],\n  userId: \"user-123\",\n  metadata: {\n    category: \"travel\",\n    priority: \"high\"\n  }\n});\n```\n\n### Search Memories\n\n```typescript\nconst results = await client.search({\n  query: \"What are my IDE preferences?\",\n  userId: \"user-123\",\n  limit: 5\n});\n\nresults.forEach(memory => {\n  console.log(`${memory.id}: ${memory.memory}`);\n  console.log(`Category: ${memory.metadata?.category}`);\n});\n```\n\n### Get All Memories\n\n```typescript\nconst allMemories = await client.getAll({\n  userId: \"user-123\"\n});\n```\n\n### Update Memory\n\n```typescript\nawait client.update({\n  memoryId: \"memory-uuid-here\",\n  data: \"Updated memory content here\",\n  metadata: {\n    category: \"updated-category\"\n  }\n});\n```\n\n### Delete Memory\n\n```typescript\nawait client.delete({\n  memoryId: \"memory-uuid-here\"\n});\n```\n\n### Reset User Memories\n\n```typescript\nawait client.reset({\n  userId: \"user-123\"\n});\n```\n\n资料来源：[mem0-ts/README.md](https://github.com/mem0ai/mem0/blob/main/mem0-ts/README.md)\n\n## Open Source Module Structure\n\nThe OSS implementation follows a modular architecture with separate concerns for memory management, embedding, and storage.\n\n```mermaid\ngraph LR\n    A[Mem0Client] --> B[Memory Class]\n    B --> C[Embedding]\n    B --> D[Vector Store]\n    B --> E[SQLite DB]\n    C --> F[OpenAI Embeddings]\n    D --> G[Chroma/Qdrant/In-Memory]\n```\n\n### Memory Class\n\nThe `Memory` class orchestrates the OSS memory operations, coordinating between the embedding service, vector store, and SQLite database.\n\n| Method | Description |\n|--------|-------------|\n| `add` | Process and store new memories with embeddings |\n| `search` | Perform vector similarity search |\n| `get` | Retrieve memories by ID |\n| `delete` | Remove memory from all stores |\n| `reset` | Clear all user memories |\n\n资料来源：[mem0-ts/src/oss/src/memory/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/oss/src/memory/index.ts)\n\n## Message Format\n\nThe SDK uses a standardized message format for conversation history:\n\n```typescript\ninterface Message {\n  role: \"system\" | \"user\" | \"assistant\";\n  content: string;\n}\n```\n\nMessages are processed to extract semantic meaning and stored as discrete memory entries with associated event types.\n\n资料来源：[mem0-ts/src/oss/src/types/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/oss/src/types/index.ts)\n\n## Supported Embedders\n\n| Provider | Model Options | Configuration |\n|----------|---------------|---------------|\n| OpenAI | `text-embedding-3-small`, `text-embedding-3-large`, `text-embedding-ada-002` | `api_key` |\n| Local | Custom embedding models | `model_path` |\n\n资料来源：[mem0-ts/src/client/config.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/config.ts)\n\n## Supported Vector Stores\n\n| Provider | Description | Persistence |\n|----------|-------------|-------------|\n| Chroma | Open source vector database | Durable |\n| Qdrant | High-performance vector search | Durable |\n| In-memory | Temporary storage for testing | Volatile |\n\n## Event Types\n\nMemories are categorized by event types for organizational purposes:\n\n| Event Type | Usage |\n|------------|-------|\n| `memory` | General conversation memories |\n| `preference` | User preferences and settings |\n| `fact` | Factual information about users |\n| `knowledge` | Learned domain knowledge |\n\n资料来源：[mem0-ts/src/client/mem0.types.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.types.ts)\n\n## Workflow Diagram\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant Client as Mem0Client\n    participant API as mem0ai API\n    \n    App->>Client: new Mem0Client(config)\n    Note over Client: Initialize with config\n    \n    App->>Client: add({messages, userId})\n    Client->>API: POST /memories\n    API-->>Client: MemoryResult[]\n    Client-->>App: Promise<MemoryResult[]>\n    \n    App->>Client: search({query, userId})\n    Client->>API: POST /memories/search\n    API-->>Client: MemoryResult[]\n    Client-->>App: Promise<MemoryResult[]>\n    \n    App->>Client: getAll({userId})\n    Client->>API: GET /memories\n    API-->>Client: MemoryResult[]\n    Client-->>App: Promise<MemoryResult[]>\n```\n\n## Error Handling\n\nThe SDK uses standard JavaScript error handling patterns:\n\n```typescript\ntry {\n  const memories = await client.search({\n    query: \"test query\",\n    userId: \"user-123\"\n  });\n} catch (error) {\n  if (error.status === 401) {\n    console.error(\"Invalid API key\");\n  } else if (error.status === 404) {\n    console.error(\"Resource not found\");\n  } else {\n    console.error(\"Request failed:\", error.message);\n  }\n}\n```\n\n## Environment Variables\n\nWhile not required, the SDK supports environment-based configuration:\n\n```bash\nexport MEM0_API_KEY=\"your-api-key\"\nexport OPENAI_API_KEY=\"your-openai-key\"\n```\n\n## TypeScript Support\n\nThe SDK is written in TypeScript and provides full type definitions out of the box. No additional `@types` packages are required.\n\n```typescript\nimport { Mem0Client, Mem0Config, MemoryResult, Message } from \"mem0ai\";\n```\n\nAll exported types are available from the main package entry point.\n\n资料来源：[mem0-ts/src/client/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/index.ts)\n\n## Quick Reference\n\n### Minimal Hosted Example\n\n```typescript\nimport { Mem0Client } from \"mem0ai\";\n\nconst client = new Mem0Client({ apiKey: \"your-key\" });\nconst memories = await client.add({\n  messages: [{ role: \"user\", content: \"Hello\" }],\n  userId: \"user-1\"\n});\n```\n\n### Minimal OSS Example\n\n```typescript\nimport { Mem0Client } from \"mem0ai\";\n\nconst client = new Mem0Client({\n  embedder: { provider: \"openai\", config: { api_key: \"key\", model: \"text-embedding-3-small\" } },\n  vectorStore: { provider: \"chroma\", config: { collection_name: \"memories\" } },\n  db: { provider: \"sqlite\" }\n});\n```\n\n资料来源：[mem0-ts/README.md](https://github.com/mem0ai/mem0/blob/main/mem0-ts/README.md)\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：mem0ai/mem0\n\n摘要：发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。\n\n## 1. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:656099147 | https://github.com/mem0ai/mem0 | README/documentation is current enough for a first validation pass.\n\n## 2. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | last_activity_observed missing\n\n## 3. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:656099147 | https://github.com/mem0ai/mem0 | no_demo; severity=medium\n\n## 4. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:656099147 | https://github.com/mem0ai/mem0 | no_demo; severity=medium\n\n## 5. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | issue_or_pr_quality=unknown\n\n## 6. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | release_recency=unknown\n\n<!-- canonical_name: mem0ai/mem0; human_manual_source: deepwiki_human_wiki -->\n",
      "markdown_key": "mem0",
      "pages": "draft",
      "source_refs": [
        {
          "evidence_id": "github_repo:656099147",
          "kind": "repo",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/mem0ai/mem0"
        },
        {
          "evidence_id": "art_cca59e4451e14706bc74c915a1ecbf6e",
          "kind": "docs",
          "supports_claim_ids": [
            "claim_identity",
            "claim_distribution",
            "claim_capability"
          ],
          "url": "https://github.com/mem0ai/mem0#readme"
        }
      ],
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "mem0 说明书",
      "toc": [
        "https://github.com/mem0ai/mem0 项目说明书",
        "目录",
        "Introduction to Mem0",
        "Core Concepts",
        "Architecture Overview",
        "Deployment Options",
        "Recommended bootstrap command",
        "Manual start",
        "Doramagic 踩坑日志"
      ]
    }
  },
  "quality_gate": {
    "blocking_gaps": [],
    "category_confidence": "medium",
    "compile_status": "ready_for_review",
    "five_assets_present": true,
    "install_sandbox_verified": true,
    "missing_evidence": [],
    "next_action": "publish to Doramagic.ai project surfaces",
    "prompt_preview_boundary_ok": true,
    "publish_status": "publishable",
    "quick_start_verified": true,
    "repo_clone_verified": true,
    "repo_commit": "219b1a6f3d376989e76fc68362bb577cb189ec31",
    "repo_inspection_error": null,
    "repo_inspection_files": [
      "pyproject.toml",
      "README.md",
      "docs/integrations.mdx",
      "docs/openapi.json",
      "docs/README.md",
      "docs/api-reference.mdx",
      "docs/docs.json",
      "docs/vibecoding.mdx",
      "docs/introduction.mdx",
      "docs/core-concepts/memory-evaluation.mdx",
      "docs/core-concepts/memory-types.mdx",
      "docs/migration/oss-v2-to-v3.mdx",
      "docs/migration/api-changes.mdx",
      "docs/migration/oss-to-platform.mdx",
      "docs/migration/platform-v2-to-v3.mdx",
      "docs/api-reference/organizations-projects.mdx",
      "docs/platform/contribute.mdx",
      "docs/platform/cli.mdx",
      "docs/platform/faqs.mdx",
      "docs/platform/platform-vs-oss.mdx",
      "docs/platform/mem0-mcp.mdx",
      "docs/platform/quickstart.mdx",
      "docs/platform/overview.mdx",
      "docs/platform/advanced-memory-operations.mdx",
      "docs/contributing/development.mdx",
      "docs/contributing/documentation.mdx",
      "docs/cookbooks/overview.mdx",
      "docs/templates/release_notes_template.mdx",
      "docs/templates/cookbook_template.mdx",
      "docs/templates/migration_guide_template.mdx",
      "docs/templates/concept_guide_template.mdx",
      "docs/templates/parameters_reference_template.mdx",
      "docs/templates/integration_guide_template.mdx",
      "docs/templates/api_reference_template.mdx",
      "docs/templates/operation_guide_template.mdx",
      "docs/templates/feature_guide_template.mdx",
      "docs/templates/troubleshooting_playbook_template.mdx",
      "docs/templates/quickstart_template.mdx",
      "docs/templates/section_overview_template.mdx",
      "docs/changelog/openclaw.mdx"
    ],
    "repo_inspection_verified": true,
    "review_reasons": [],
    "tag_count_ok": true,
    "unsupported_claims": []
  },
  "schema_version": "0.1",
  "user_assets": {
    "ai_context_pack": {
      "asset_id": "ai_context_pack",
      "filename": "AI_CONTEXT_PACK.md",
      "markdown": "# @mem0/cli - Doramagic AI Context Pack\n\n> 定位：安装前体验与判断资产。它帮助宿主 AI 有一个好的开始，但不代表已经安装、执行或验证目标项目。\n\n## 充分原则\n\n- **充分原则，不是压缩原则**：AI Context Pack 应该充分到让宿主 AI 在开工前理解项目价值、能力边界、使用入口、风险和证据来源；它可以分层组织，但不以最短摘要为目标。\n- **压缩策略**：只压缩噪声和重复内容，不压缩会影响判断和开工质量的上下文。\n\n## 给宿主 AI 的使用方式\n\n你正在读取 Doramagic 为 @mem0/cli 编译的 AI Context Pack。请把它当作开工前上下文：帮助用户理解适合谁、能做什么、如何开始、哪些必须安装后验证、风险在哪里。不要声称你已经安装、运行或执行了目标项目。\n\n## Claim 消费规则\n\n- **事实来源**：Repo Evidence + Claim/Evidence Graph；Human Wiki 只提供显著性、术语和叙事结构。\n- **事实最低状态**：`supported`\n- `supported`：可以作为项目事实使用，但回答中必须引用 claim_id 和证据路径。\n- `weak`：只能作为低置信度线索，必须要求用户继续核实。\n- `inferred`：只能用于风险提示或待确认问题，不能包装成项目事实。\n- `unverified`：不得作为事实使用，应明确说证据不足。\n- `contradicted`：必须展示冲突来源，不得替用户强行选择一个版本。\n\n## 它最适合谁\n\n- **AI 研究者或研究型 Agent 构建者**：README 明确围绕研究、实验或论文工作流展开。 证据：`README.md` Claim：`clm_0004` supported 0.86\n- **正在使用 Claude/Codex/Cursor/Gemini 等宿主 AI 的开发者**：README 或插件配置提到多个宿主 AI。 证据：`README.md` Claim：`clm_0005` supported 0.86\n- **希望把专业流程带进宿主 AI 的用户**：仓库包含 Skill 文档。 证据：`mem0-plugin/skills/mem0/SKILL.md`, `mem0-plugin/skills/mem0-mcp/SKILL.md`, `openclaw/skills/memory-dream/SKILL.md`, `openclaw/skills/memory-triage/SKILL.md` 等 Claim：`clm_0006` supported 0.86\n\n## 它能做什么\n\n- **AI Skill / Agent 指令资产库**（可做安装前预览）：项目包含可被宿主 AI 读取的 Skill 或 Agent 指令文件，可用于把专业流程带入 Claude、Codex、Cursor 等宿主。 证据：`mem0-plugin/skills/mem0/SKILL.md`, `mem0-plugin/skills/mem0-mcp/SKILL.md`, `openclaw/skills/memory-dream/SKILL.md`, `openclaw/skills/memory-triage/SKILL.md` 等 Claim：`clm_0001` supported 0.86\n- **多宿主安装与分发**（需要安装后验证）：项目包含插件或 marketplace 配置，说明它面向一个或多个 AI 宿主的安装和分发。 证据：`.agents/plugins/marketplace.json`, `.claude-plugin/marketplace.json`, `.cursor-plugin/marketplace.json`, `mem0-plugin/.claude-plugin/plugin.json` 等 Claim：`clm_0002` supported 0.86\n- **命令行启动或安装流程**（需要安装后验证）：项目文档中存在可执行命令，真实使用需要在本地或宿主环境中运行这些命令。 证据：`AGENTS.md`, `README.md`, `examples/nemoclaw/quickstart.md`, `mem0-plugin/skills/mem0/references/quickstart.md` 等 Claim：`clm_0003` supported 0.86\n\n## 怎么开始\n\n- `pip install mem0ai` 证据：`README.md` Claim：`clm_0007` supported 0.86, `clm_0008` supported 0.86\n- `pip install mem0ai[nlp]` 证据：`README.md` Claim：`clm_0008` supported 0.86\n- `npm install mem0ai` 证据：`README.md` Claim：`clm_0009` supported 0.86\n- `npm install -g @mem0/cli   # or: pip install mem0-cli` 证据：`README.md` Claim：`clm_0010` supported 0.86\n- `npx skills add https://github.com/mem0ai/mem0 --skill mem0` 证据：`README.md` Claim：`clm_0011` supported 0.86, `clm_0012` supported 0.86, `clm_0013` supported 0.86, `clm_0014` supported 0.86 等\n- `npx skills add https://github.com/mem0ai/mem0 --skill mem0-cli` 证据：`README.md` Claim：`clm_0012` supported 0.86\n- `npx skills add https://github.com/mem0ai/mem0 --skill mem0-vercel-ai-sdk` 证据：`README.md` Claim：`clm_0013` supported 0.86\n- `npx skills add https://github.com/mem0ai/mem0 --skill mem0-integrate` 证据：`README.md` Claim：`clm_0014` supported 0.86\n- `npx skills add https://github.com/mem0ai/mem0 --skill mem0-test-integration` 证据：`README.md` Claim：`clm_0015` supported 0.86\n- `curl -fsSL https://raw.githubusercontent.com/mem0ai/mem0/main/examples/nemoclaw/setup-mem0-nemoclaw.sh -o setup-mem0-nemoclaw.sh` 证据：`examples/nemoclaw/quickstart.md` Claim：`clm_0016` unverified 0.25\n\n## 继续前判断卡\n\n- **当前建议**：需要管理员/安全审批\n- **为什么**：继续前可能涉及密钥、账号、外部服务或敏感上下文，建议先经过管理员或安全审批。\n\n### 30 秒判断\n\n- **现在怎么做**：需要管理员/安全审批\n- **最小安全下一步**：先跑 Prompt Preview；若涉及凭证或企业环境，先审批再试装\n- **先别相信**：工具权限边界不能在安装前相信。\n- **继续会触碰**：命令执行、宿主 AI 配置、本地环境或项目文件\n\n### 现在可以相信\n\n- **适合人群线索：AI 研究者或研究型 Agent 构建者**（supported）：有 supported claim 或项目证据支撑，但仍不等于真实安装效果。 证据：`README.md` Claim：`clm_0004` supported 0.86\n- **适合人群线索：正在使用 Claude/Codex/Cursor/Gemini 等宿主 AI 的开发者**（supported）：有 supported claim 或项目证据支撑，但仍不等于真实安装效果。 证据：`README.md` Claim：`clm_0005` supported 0.86\n- **适合人群线索：希望把专业流程带进宿主 AI 的用户**（supported）：有 supported claim 或项目证据支撑，但仍不等于真实安装效果。 证据：`mem0-plugin/skills/mem0/SKILL.md`, `mem0-plugin/skills/mem0-mcp/SKILL.md`, `openclaw/skills/memory-dream/SKILL.md`, `openclaw/skills/memory-triage/SKILL.md` 等 Claim：`clm_0006` supported 0.86\n- **能力存在：AI Skill / Agent 指令资产库**（supported）：可以相信项目包含这类能力线索；是否适合你的具体任务仍要试用或安装后验证。 证据：`mem0-plugin/skills/mem0/SKILL.md`, `mem0-plugin/skills/mem0-mcp/SKILL.md`, `openclaw/skills/memory-dream/SKILL.md`, `openclaw/skills/memory-triage/SKILL.md` 等 Claim：`clm_0001` supported 0.86\n- **能力存在：多宿主安装与分发**（supported）：可以相信项目包含这类能力线索；是否适合你的具体任务仍要试用或安装后验证。 证据：`.agents/plugins/marketplace.json`, `.claude-plugin/marketplace.json`, `.cursor-plugin/marketplace.json`, `mem0-plugin/.claude-plugin/plugin.json` 等 Claim：`clm_0002` supported 0.86\n- **能力存在：命令行启动或安装流程**（supported）：可以相信项目包含这类能力线索；是否适合你的具体任务仍要试用或安装后验证。 证据：`AGENTS.md`, `README.md`, `examples/nemoclaw/quickstart.md`, `mem0-plugin/skills/mem0/references/quickstart.md` 等 Claim：`clm_0003` supported 0.86\n\n### 现在还不能相信\n\n- **工具权限边界不能在安装前相信。**（unverified）：MCP/tool 类项目通常会触碰文件、网络、浏览器或外部 API，必须真实检查权限和日志。\n- **真实输出质量不能在安装前相信。**（unverified）：Prompt Preview 只能展示引导方式，不能证明真实项目中的结果质量。\n- **宿主 AI 版本兼容性不能在安装前相信。**（unverified）：Claude、Cursor、Codex、Gemini 等宿主加载规则和版本差异必须在真实环境验证。\n- **不会污染现有宿主 AI 行为，不能直接相信。**（inferred）：Skill、plugin、AGENTS/CLAUDE/GEMINI 指令可能改变宿主 AI 的默认行为。 证据：`.agents/plugins/marketplace.json`, `.claude-plugin/marketplace.json`, `.cursor-plugin/marketplace.json`, `AGENTS.md` 等\n- **可安全回滚不能默认相信。**（unverified）：除非项目明确提供卸载和恢复说明，否则必须先在隔离环境验证。\n- **真实安装后是否与用户当前宿主 AI 版本兼容？**（unverified）：兼容性只能通过实际宿主环境验证。 证据：`.agents/plugins/marketplace.json`, `.claude-plugin/marketplace.json`, `.cursor-plugin/marketplace.json`, `mem0-plugin/.claude-plugin/plugin.json` 等\n- **项目输出质量是否满足用户具体任务？**（unverified）：安装前预览只能展示流程和边界，不能替代真实评测。\n- **安装命令是否需要网络、权限或全局写入？**（unverified）：这影响企业环境和个人环境的安装风险。 证据：`README.md`\n\n### 继续会触碰什么\n\n- **命令执行**：包管理器、网络下载、本地插件目录、项目配置或用户主目录。 原因：运行第一条命令就可能产生环境改动；必须先判断是否值得跑。 证据：`AGENTS.md`, `README.md`, `examples/nemoclaw/quickstart.md`, `mem0-plugin/skills/mem0/references/quickstart.md` 等\n- **宿主 AI 配置**：Claude/Codex/Cursor/Gemini/OpenCode 等宿主的 plugin、Skill 或规则加载配置。 原因：宿主配置会改变 AI 后续工作方式，可能和用户已有规则冲突。 证据：`.agents/plugins/marketplace.json`, `.claude-plugin/marketplace.json`, `.cursor-plugin/marketplace.json`, `AGENTS.md` 等\n- **本地环境或项目文件**：安装结果、插件缓存、项目配置或本地依赖目录。 原因：安装前无法证明写入范围和回滚方式，需要隔离验证。 证据：`.agents/plugins/marketplace.json`, `.claude-plugin/marketplace.json`, `.cursor-plugin/marketplace.json`, `AGENTS.md` 等\n- **环境变量 / API Key**：项目入口文档明确出现 API key、token、secret 或账号凭证配置。 原因：如果真实安装需要凭证，应先使用测试凭证并经过权限/合规判断。 证据：`README.md`, `server/auth.py`, `server/main.py`, `tests/test_server_auth.py` 等\n- **宿主 AI 上下文**：AI Context Pack、Prompt Preview、Skill 路由、风险规则和项目事实。 原因：导入上下文会影响宿主 AI 后续判断，必须避免把未验证项包装成事实。\n\n### 最小安全下一步\n\n- **先跑 Prompt Preview**：用安装前交互式试用判断工作方式是否匹配，不需要授权或改环境。（适用：任何项目都适用，尤其是输出质量未知时。）\n- **只在隔离目录或测试账号试装**：避免安装命令污染主力宿主 AI、真实项目或用户主目录。（适用：存在命令执行、插件配置或本地写入线索时。）\n- **先备份宿主 AI 配置**：Skill、plugin、规则文件可能改变 Claude/Cursor/Codex 的默认行为。（适用：存在插件 manifest、Skill 或宿主规则入口时。）\n- **不要使用真实生产凭证**：环境变量/API key 一旦进入宿主或工具链，可能产生账号和合规风险。（适用：出现 API、TOKEN、KEY、SECRET 等环境线索时。）\n- **安装后只验证一个最小任务**：先验证加载、兼容、输出质量和回滚，再决定是否深用。（适用：准备从试用进入真实工作流时。）\n\n### 退出方式\n\n- **保留安装前状态**：记录原始宿主配置和项目状态，后续才能判断是否可恢复。\n- **准备移除宿主 plugin / Skill / 规则入口**：如果试装后行为异常，可以把宿主 AI 恢复到试装前状态。\n- **记录安装命令和写入路径**：没有明确卸载说明时，至少要知道哪些目录或配置需要手动清理。\n- **准备撤销测试 API key 或 token**：测试凭证泄露或误用时，可以快速止损。\n- **如果没有回滚路径，不进入主力环境**：不可回滚是继续前阻断项，不应靠信任或运气继续。\n\n## 哪些只能预览\n\n- 解释项目适合谁和能做什么\n- 基于项目文档演示典型对话流程\n- 帮助用户判断是否值得安装或继续研究\n\n## 哪些必须安装后验证\n\n- 真实安装 Skill、插件或 CLI\n- 执行脚本、修改本地文件或访问外部服务\n- 验证真实输出质量、性能和兼容性\n\n## 边界与风险判断卡\n\n- **把安装前预览误认为真实运行**：用户可能高估项目已经完成的配置、权限和兼容性验证。 处理方式：明确区分 prompt_preview_can_do 与 runtime_required。 Claim：`clm_0024` inferred 0.45\n- **宿主 AI 插件或 Skill 规则冲突**：新规则可能改变用户现有宿主 AI 的工作方式。 处理方式：安装前先检查插件 manifest 和 Skill 文件，必要时隔离测试。 证据：`.agents/plugins/marketplace.json`, `.claude-plugin/marketplace.json`, `.cursor-plugin/marketplace.json`, `mem0-plugin/.claude-plugin/plugin.json` 等 Claim：`clm_0025` supported 0.86\n- **命令执行会修改本地环境**：安装命令可能写入用户主目录、宿主插件目录或项目配置。 处理方式：先在隔离环境或测试账号中运行。 证据：`AGENTS.md`, `README.md`, `examples/nemoclaw/quickstart.md`, `mem0-plugin/skills/mem0/references/quickstart.md` 等 Claim：`clm_0026` supported 0.86\n- **待确认**：真实安装后是否与用户当前宿主 AI 版本兼容？。原因：兼容性只能通过实际宿主环境验证。\n- **待确认**：项目输出质量是否满足用户具体任务？。原因：安装前预览只能展示流程和边界，不能替代真实评测。\n- **待确认**：安装命令是否需要网络、权限或全局写入？。原因：这影响企业环境和个人环境的安装风险。\n\n## 开工前工作上下文\n\n### 加载顺序\n\n- 先读取 how_to_use.host_ai_instruction，建立安装前判断资产的边界。\n- 读取 claim_graph_summary，确认事实来自 Claim/Evidence Graph，而不是 Human Wiki 叙事。\n- 再读取 intended_users、capabilities 和 quick_start_candidates，判断用户是否匹配。\n- 需要执行具体任务时，优先查 role_skill_index，再查 evidence_index。\n- 遇到真实安装、文件修改、网络访问、性能或兼容性问题时，转入 risk_card 和 boundaries.runtime_required。\n\n### 任务路由\n\n- **AI Skill / Agent 指令资产库**：先基于 role_skill_index / evidence_index 帮用户挑选可用角色、Skill 或工作流。 边界：可做安装前 Prompt 体验。 证据：`mem0-plugin/skills/mem0/SKILL.md`, `mem0-plugin/skills/mem0-mcp/SKILL.md`, `openclaw/skills/memory-dream/SKILL.md`, `openclaw/skills/memory-triage/SKILL.md` 等 Claim：`clm_0001` supported 0.86\n- **多宿主安装与分发**：先说明这是安装后验证能力，再给出安装前检查清单。 边界：必须真实安装或运行后验证。 证据：`.agents/plugins/marketplace.json`, `.claude-plugin/marketplace.json`, `.cursor-plugin/marketplace.json`, `mem0-plugin/.claude-plugin/plugin.json` 等 Claim：`clm_0002` supported 0.86\n- **命令行启动或安装流程**：先说明这是安装后验证能力，再给出安装前检查清单。 边界：必须真实安装或运行后验证。 证据：`AGENTS.md`, `README.md`, `examples/nemoclaw/quickstart.md`, `mem0-plugin/skills/mem0/references/quickstart.md` 等 Claim：`clm_0003` supported 0.86\n\n### 上下文规模\n\n- 文件总数：1964\n- 重要文件覆盖：40/1964\n- 证据索引条目：80\n- 角色 / Skill 条目：9\n\n### 证据不足时的处理\n\n- **missing_evidence**：说明证据不足，要求用户提供目标文件、README 段落或安装后验证记录；不要补全事实。\n- **out_of_scope_request**：说明该任务超出当前 AI Context Pack 证据范围，并建议用户先查看 Human Manual 或真实安装后验证。\n- **runtime_request**：给出安装前检查清单和命令来源，但不要替用户执行命令或声称已执行。\n- **source_conflict**：同时展示冲突来源，标记为待核实，不要强行选择一个版本。\n\n## Prompt Recipes\n\n### 适配判断\n\n- 目标：判断这个项目是否适合用户当前任务。\n- 预期输出：适配结论、关键理由、证据引用、安装前可预览内容、必须安装后验证内容、下一步建议。\n\n```text\n请基于 @mem0/cli 的 AI Context Pack，先问我 3 个必要问题，然后判断它是否适合我的任务。回答必须包含：适合谁、能做什么、不能做什么、是否值得安装、证据来自哪里。所有项目事实必须引用 evidence_refs、source_paths 或 claim_id。\n```\n\n### 安装前体验\n\n- 目标：让用户在安装前感受核心工作流，同时避免把预览包装成真实能力或营销承诺。\n- 预期输出：一段带边界标签的体验剧本、安装后验证清单和谨慎建议；不含真实运行承诺或强营销表述。\n\n```text\n请把 @mem0/cli 当作安装前体验资产，而不是已安装工具或真实运行环境。\n\n请严格输出四段：\n1. 先问我 3 个必要问题。\n2. 给出一段“体验剧本”：用 [安装前可预览]、[必须安装后验证]、[证据不足] 三种标签展示它可能如何引导工作流。\n3. 给出安装后验证清单：列出哪些能力只有真实安装、真实宿主加载、真实项目运行后才能确认。\n4. 给出谨慎建议：只能说“值得继续研究/试装”“先补充信息后再判断”或“不建议继续”，不得替项目背书。\n\n硬性边界：\n- 不要声称已经安装、运行、执行测试、修改文件或产生真实结果。\n- 不要写“自动适配”“确保通过”“完美适配”“强烈建议安装”等承诺性表达。\n- 如果描述安装后的工作方式，必须使用“如果安装成功且宿主正确加载 Skill，它可能会……”这种条件句。\n- 体验剧本只能写成“示例台词/假设流程”：使用“可能会询问/可能会建议/可能会展示”，不要写“已写入、已生成、已通过、正在运行、正在生成”。\n- Prompt Preview 不负责给安装命令；如用户准备试装，只能提示先阅读 Quick Start 和 Risk Card，并在隔离环境验证。\n- 所有项目事实必须来自 supported claim、evidence_refs 或 source_paths；inferred/unverified 只能作风险或待确认项。\n\n```\n\n### 角色 / Skill 选择\n\n- 目标：从项目里的角色或 Skill 中挑选最匹配的资产。\n- 预期输出：候选角色或 Skill 列表，每项包含适用场景、证据路径、风险边界和是否需要安装后验证。\n\n```text\n请读取 role_skill_index，根据我的目标任务推荐 3-5 个最相关的角色或 Skill。每个推荐都要说明适用场景、可能输出、风险边界和 evidence_refs。\n```\n\n### 风险预检\n\n- 目标：安装或引入前识别环境、权限、规则冲突和质量风险。\n- 预期输出：环境、权限、依赖、许可、宿主冲突、质量风险和未知项的检查清单。\n\n```text\n请基于 risk_card、boundaries 和 quick_start_candidates，给我一份安装前风险预检清单。不要替我执行命令，只说明我应该检查什么、为什么检查、失败会有什么影响。\n```\n\n### 宿主 AI 开工指令\n\n- 目标：把项目上下文转成一次对话开始前的宿主 AI 指令。\n- 预期输出：一段边界明确、证据引用明确、适合复制给宿主 AI 的开工前指令。\n\n```text\n请基于 @mem0/cli 的 AI Context Pack，生成一段我可以粘贴给宿主 AI 的开工前指令。这段指令必须遵守 not_runtime=true，不能声称项目已经安装、运行或产生真实结果。\n```\n\n\n## 角色 / Skill 索引\n\n- 共索引 9 个角色 / Skill / 项目文档条目。\n\n- **mem0-mcp**（skill）： 激活提示：当用户任务与“mem0-mcp”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`mem0-plugin/skills/mem0-mcp/SKILL.md`\n- **mem0**（skill）： 激活提示：当用户任务与“mem0”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`mem0-plugin/skills/mem0/SKILL.md`\n- **memory-dream**（skill）： 激活提示：当用户任务与“memory-dream”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`openclaw/skills/memory-dream/SKILL.md`\n- **memory-triage**（skill）： 激活提示：当用户任务与“memory-triage”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`openclaw/skills/memory-triage/SKILL.md`\n- **mem0-cli**（skill）： 激活提示：当用户任务与“mem0-cli”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`skills/mem0-cli/SKILL.md`\n- **mem0-integrate**（skill）： 激活提示：当用户任务与“mem0-integrate”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`skills/mem0-integrate/SKILL.md`\n- **mem0-test-integration**（skill）： 激活提示：当用户任务与“mem0-test-integration”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`skills/mem0-test-integration/SKILL.md`\n- **mem0-vercel-ai-sdk**（skill）： 激活提示：当用户任务与“mem0-vercel-ai-sdk”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`skills/mem0-vercel-ai-sdk/SKILL.md`\n- **mem0**（skill）： 激活提示：当用户任务与“mem0”描述的流程高度相关时，先用它做安装前体验，再决定是否安装。 证据：`skills/mem0/SKILL.md`\n\n## 证据索引\n\n- 共索引 80 条证据。\n\n- **Mintlify Starter Kit**（documentation）：Click on Use this template to copy the Mintlify starter kit. The starter kit contains examples including 证据：`docs/README.md`\n- **Contributing to embedchain docs**（documentation）：Install the Mintlify CLI https://www.npmjs.com/package/mintlify to preview the documentation changes locally. To install, use the following command 证据：`embedchain/docs/README.md`\n- **AGENTS.md**（documentation）：This file provides context for AI coding assistants Claude Code, Cursor, GitHub Copilot, Codex, etc. working with the Mem0 repository. 证据：`AGENTS.md`\n- **New Memory Algorithm April 2026**（documentation）：📄 Benchmarking Mem0's token-efficient memory algorithm → 证据：`README.md`\n- **mem0 CLI**（documentation）：The official command-line interface for mem0 https://mem0.ai — the memory layer for AI agents. Works with the Mem0 Platform API. Available in Python and Node.js. 证据：`cli/README.md`\n- **What is Embedchain?**（documentation）：Embedchain is an Open Source Framework for personalizing LLM responses. It makes it easy to create and deploy personalized AI apps. At its core, Embedchain follows the design principle of being \"Conventional but Configurable\" to serve both software engineers and machine learning engineers. 证据：`embedchain/README.md`\n- **Mem0: Building Production‑Ready AI Agents with Scalable Long‑Term Memory**（documentation）：Mem0: Building Production‑Ready AI Agents with Scalable Long‑Term Memory 证据：`evaluation/README.md`\n- **Mem0 Plugin for Claude Code, Claude Cowork, Cursor & Codex**（documentation）：Mem0 Plugin for Claude Code, Claude Cowork, Cursor & Codex 证据：`mem0-plugin/README.md`\n- **Mem0 - The Memory Layer for Your AI Apps**（documentation）：Mem0 - The Memory Layer for Your AI Apps 证据：`mem0-ts/README.md`\n- **@mem0/openclaw-mem0**（documentation）：Long-term memory for OpenClaw https://github.com/openclaw/openclaw agents, powered by Mem0 https://mem0.ai . 证据：`openclaw/README.md`\n- **OpenMemory**（documentation）：⚠️ Sunsetting Notice: OpenMemory is being sunset. For local self-hosted memory with a dashboard, please use the Mem0 self-hosted server https://docs.mem0.ai/open-source/overview instead. Get started with cd server && make bootstrap . See the self-hosted docs https://docs.mem0.ai/open-source/setup for configuration details. 证据：`openmemory/README.md`\n- **Mem0 Self-Hosted Server**（documentation）：Mem0 ships a self-hosted FastAPI server plus a local dashboard. It is secure by default, supports dashboard login and API keys, and exposes OpenAPI docs at /docs . 证据：`server/README.md`\n- **Mem0 Skills for AI Coding Assistants**（documentation）：Mem0 Skills for AI Coding Assistants 证据：`skills/README.md`\n- **Mem0 AI SDK Provider**（documentation）：The Mem0 AI SDK Provider is a community-maintained library developed by Mem0 https://mem0.ai/ to integrate with the Vercel AI SDK. This library brings enhanced AI interaction capabilities to your applications by introducing persistent memory functionality. With Mem0, language model conversations gain memory, enabling more contextualized and personalized responses based on past interactions. 证据：`vercel-ai-sdk/README.md`\n- **mem0 CLI Node.js**（documentation）：The official command-line interface for mem0 https://mem0.ai — the memory layer for AI agents. TypeScript implementation. 证据：`cli/node/README.md`\n- **mem0 CLI Python**（documentation）：The official command-line interface for mem0 https://mem0.ai — the memory layer for AI agents. Python implementation. 证据：`cli/python/README.md`\n- **API Server**（documentation）：This is a docker template to create your own API Server using the embedchain package. To know more about the API Server and how to use it, go here https://docs.embedchain.ai/examples/api server . 证据：`embedchain/examples/api_server/README.md`\n- **Chainlit + Embedchain Demo**（documentation）：In this example, we will learn how to use Chainlit and Embedchain together 证据：`embedchain/examples/chainlit/README.md`\n- **Embedchain Chat with PDF App**（documentation）：You can easily create and deploy your own Chat-with-PDF App using Embedchain. 证据：`embedchain/examples/chat-pdf/README.md`\n- **Discord Bot**（documentation）：This is a docker template to create your own Discord bot using the embedchain package. To know more about the bot and how to use it, go here https://docs.embedchain.ai/examples/discord bot . 证据：`embedchain/examples/discord_bot/README.md`\n- **Streamlit Chat bot App Embedchain + Mistral**（documentation）：Streamlit Chat bot App Embedchain + Mistral 证据：`embedchain/examples/mistral-streamlit/README.md`\n- **Installation**（documentation）：Fork this repo on Github https://github.com/embedchain/embedchain to create your own NextJS discord and slack bot powered by Embedchain app. 证据：`embedchain/examples/nextjs/README.md`\n- **Private AI**（documentation）：In this example, we will create a private AI using embedchain. 证据：`embedchain/examples/private-ai/README.md`\n- **Single command to rule them all,**（documentation）：Single command to rule them all, To run the app locally, Using docker locally , 证据：`embedchain/examples/rest-api/README.md`\n- **Config directory**（documentation）：Here, all the YAML files will get stored. 证据：`embedchain/examples/rest-api/configs/README.md`\n- **Sadhguru AI**（documentation）：This directory contains the code used to implement Sadhguru AI https://sadhguru-ai.streamlit.app/ using Embedchain. It is built on 3K+ videos and 1K+ articles of Sadhguru. You can find the full list of data sources here https://gist.github.com/deshraj/50b0597157e04829bbbb7bc418be6ccb . 证据：`embedchain/examples/sadhguru-ai/README.md`\n- **Telegram Bot**（documentation）：This is a replit template to create your own Telegram bot using the embedchain package. To know more about the bot and how to use it, go here https://docs.embedchain.ai/examples/telegram bot . 证据：`embedchain/examples/telegram_bot/README.md`\n- **Unacademy UPSC AI**（documentation）：This directory contains the code used to implement Unacademy UPSC AI https://unacademy-ai.streamlit.app/ using Embedchain. It is built on 16K+ youtube videos and 800+ course pages from Unacademy website. You can find the full list of data sources here https://gist.github.com/deshraj/7714feadccca13cefe574951652fa9b2 . 证据：`embedchain/examples/unacademy-ai/README.md`\n- **WhatsApp Bot**（documentation）：This is a replit template to create your own WhatsApp bot using the embedchain package. To know more about the bot and how to use it, go here https://docs.embedchain.ai/examples/whatsapp bot . 证据：`embedchain/examples/whatsapp_bot/README.md`\n- **Mem0 Assistant Chrome Extension**（documentation）：A powerful Chrome extension that combines AI chat with your personal knowledge base through mem0. Get instant, personalized answers about video content while leveraging your own knowledge and memories - all without leaving the page. 证据：`examples/yt-assistant-chrome/README.md`\n- **Mem0 Skill for Claude**（documentation）：Add persistent memory to any AI application in minutes using Mem0 Platform https://app.mem0.ai?utm source=oss&utm medium=mem0-plugin-skill-readme . 证据：`mem0-plugin/skills/mem0/README.md`\n- **mem0-ts**（documentation）：A TypeScript implementation of the mem0 memory system, using OpenAI for embeddings and completions. 证据：`mem0-ts/src/oss/README.md`\n- **OpenMemory API**（documentation）：This directory contains the backend API for OpenMemory, built with FastAPI and SQLAlchemy. This also runs the Mem0 MCP Server that you can use with MCP clients to remember things. 证据：`openmemory/api/README.md`\n- **Mem0 CLI Skill for Claude**（documentation）：Manage memories from the terminal using the Mem0 CLI https://docs.mem0.ai/cli . This skill teaches Claude how to use every mem0 command, flag, and output mode -- for both the Node.js and Python implementations. 证据：`skills/mem0-cli/README.md`\n- **mem0-integrate — Pipeline Skill**（documentation）：Wire Mem0 https://mem0.ai into an existing repository end-to-end, using a goal-driven, test-first pipeline. 证据：`skills/mem0-integrate/README.md`\n- **mem0-test-integration — Pipeline Skill**（documentation）：mem0-test-integration — Pipeline Skill 证据：`skills/mem0-test-integration/README.md`\n- **Mem0 Vercel AI SDK Skill for Claude**（documentation）：Mem0 Vercel AI SDK Skill for Claude 证据：`skills/mem0-vercel-ai-sdk/README.md`\n- **Mem0 Skill for Claude**（documentation）：Add persistent memory to any AI application in minutes using Mem0 Platform https://app.mem0.ai?utm source=oss&utm medium=skill-mem0-readme or the open-source self-hosted SDK. 证据：`skills/mem0/README.md`\n- **Package**（package_manifest）：{ \"name\": \"mem0ai\", \"version\": \"3.0.3\", \"description\": \"The Memory Layer For Your AI Apps\", \"main\": \"./dist/index.js\", \"module\": \"./dist/index.mjs\", \"types\": \"./dist/index.d.ts\", \"typesVersions\": { \" \": { \" \": \"./dist/index.d.ts\" , \"oss\": \"./dist/oss/index.d.ts\" } }, \"exports\": { \".\": { \"types\": \"./dist/index.d.ts\", \"require\": \"./dist/index.js\", \"import\": \"./dist/index.mjs\" }, \"./oss\": { \"types\": \"./dist/oss/index.d.ts\", \"require\": \"./dist/oss/index.js\", \"import\": \"./dist/oss/index.mjs\" } }, \"files\": \"dist\" , \"scripts\": { \"clean\": \"rimraf dist\", \"build\": \"npm run clean && npx prettier --check . && npx tsup\", \"dev\": \"npx nodemon\", \"start\": \"pnpm run example memory\", \"example\": \"ts-node src/o… 证据：`mem0-ts/package.json`\n- **Package**（package_manifest）：{ \"name\": \"@mem0/openclaw-mem0\", \"version\": \"1.0.11\", \"type\": \"module\", \"description\": \"Mem0 memory backend for OpenClaw — platform or self-hosted open-source\", \"license\": \"Apache-2.0\", \"repository\": { \"type\": \"git\", \"url\": \"https://github.com/mem0ai/mem0\", \"directory\": \"openclaw\" }, \"keywords\": \"openclaw\", \"plugin\", \"memory\", \"mem0\", \"long-term-memory\" , \"main\": \"./dist/index.js\", \"types\": \"./dist/index.d.ts\", \"exports\": { \".\": { \"types\": \"./dist/index.d.ts\", \"import\": \"./dist/index.js\" } }, \"files\": \"dist\", \"openclaw.plugin.json\", \"skills\" , \"scripts\": { \"build\": \"tsup\", \"test\": \"vitest run\" }, \"dependencies\": { \"@sinclair/typebox\": \"0.34.47\", \"mem0ai\": \"3.0.2\" }, \"openclaw\": { \"extension… 证据：`openclaw/package.json`\n- **Package**（package_manifest）：{ \"name\": \"@mem0/vercel-ai-provider\", \"version\": \"2.0.5\", \"description\": \"Vercel AI Provider for providing memory to LLMs\", \"main\": \"./dist/index.js\", \"module\": \"./dist/index.mjs\", \"types\": \"./dist/index.d.ts\", \"files\": \"dist/ / \" , \"scripts\": { \"build\": \"tsup\", \"clean\": \"rm -rf dist\", \"dev\": \"nodemon\", \"lint\": \"eslint \\\"./ / .ts \\\"\", \"type-check\": \"tsc --noEmit\", \"prettier-check\": \"prettier --check \\\"./ / .ts \\\"\", \"test\": \"jest\", \"test:edge\": \"vitest --config vitest.edge.config.js --run\", \"test:node\": \"vitest --config vitest.node.config.js --run\" }, \"repository\": { \"type\": \"git\", \"url\": \"https://github.com/mem0ai/mem0\", \"directory\": \"vercel-ai-sdk\" }, \"keywords\": \"ai\", \"vercel-ai\" , \"autho… 证据：`vercel-ai-sdk/package.json`\n- **Contributing to mem0**（documentation）：Let us make contribution easy, collaborative and fun. 证据：`CONTRIBUTING.md`\n- **Package**（package_manifest）：{ \"name\": \"@mem0/cli\", \"version\": \"0.2.5\", \"description\": \"The official CLI for mem0 — the memory layer for AI agents\", \"type\": \"module\", \"bin\": { \"mem0\": \"./dist/index.js\" }, \"scripts\": { \"build\": \"tsup\", \"dev\": \"tsx src/index.ts\", \"test\": \"vitest run\", \"test:watch\": \"vitest\", \"lint\": \"biome check src/\", \"lint:fix\": \"biome check --write src/\", \"typecheck\": \"tsc --noEmit\" }, \"engines\": { \"node\": \" =18.0.0\" }, \"license\": \"Apache-2.0\", \"author\": \"mem0.ai \", \"repository\": { \"type\": \"git\", \"url\": \"https://github.com/mem0ai/mem0\", \"directory\": \"cli/node\" }, \"keywords\": \"mem0\", \"memory\", \"ai\", \"agents\", \"cli\" , \"publishConfig\": { \"access\": \"public\" }, \"dependencies\": { \"commander\": \"^12.0.0\", \"ch… 证据：`cli/node/package.json`\n- **Contributing to embedchain**（documentation）：Let us make contribution easy, collaborative and fun. 证据：`embedchain/CONTRIBUTING.md`\n- **Package**（package_manifest）：{ \"name\": \"mem0-demo\", \"version\": \"0.1.0\", \"private\": true, \"scripts\": { \"dev\": \"next dev --turbopack\", \"build\": \"next build\", \"start\": \"next start\", \"lint\": \"next lint\" }, \"dependencies\": { \"@ai-sdk/openai\": \"^1.1.15\", \"@assistant-ui/react\": \"^0.8.2\", \"@assistant-ui/react-ai-sdk\": \"^0.8.0\", \"@assistant-ui/react-markdown\": \"^0.8.0\", \"@mem0/vercel-ai-provider\": \"^1.0.4\", \"@radix-ui/react-alert-dialog\": \"^1.1.6\", \"@radix-ui/react-avatar\": \"^1.1.3\", \"@radix-ui/react-popover\": \"^1.1.6\", \"@radix-ui/react-scroll-area\": \"^1.2.3\", \"@radix-ui/react-slot\": \"^1.1.2\", \"@radix-ui/react-tooltip\": \"^1.1.8\", \"@types/js-cookie\": \"^3.0.6\", \"@types/react-syntax-highlighter\": \"^15.5.13\", \"@types/uuid\": \"^10.0.… 证据：`examples/mem0-demo/package.json`\n- **Package**（package_manifest）：{ \"name\": \"mem0-sdk-chat-bot\", \"private\": true, \"version\": \"0.0.0\", \"type\": \"module\", \"scripts\": { \"dev\": \"vite\", \"build\": \"tsc -b && vite build\", \"lint\": \"eslint .\", \"preview\": \"vite preview\" }, \"dependencies\": { \"@mem0/vercel-ai-provider\": \"0.0.12\", \"@radix-ui/react-avatar\": \"^1.1.1\", \"@radix-ui/react-dialog\": \"^1.1.2\", \"@radix-ui/react-icons\": \"^1.3.1\", \"@radix-ui/react-label\": \"^2.1.0\", \"@radix-ui/react-scroll-area\": \"^1.2.0\", \"@radix-ui/react-select\": \"^2.1.2\", \"@radix-ui/react-slot\": \"^1.1.0\", \"ai\": \"4.1.42\", \"buffer\": \"^6.0.3\", \"class-variance-authority\": \"^0.7.0\", \"clsx\": \"^2.1.1\", \"framer-motion\": \"^11.11.11\", \"lucide-react\": \"^0.454.0\", \"openai\": \"^4.86.2\", \"react\": \"^18.3.1\", \"re… 证据：`examples/multimodal-demo/package.json`\n- **Package**（package_manifest）：{ \"name\": \"openai-inbuilt-tools\", \"version\": \"1.0.0\", \"description\": \"\", \"license\": \"ISC\", \"author\": \"\", \"type\": \"module\", \"main\": \"index.js\", \"scripts\": { \"test\": \"echo \\\"Error: no test specified\\\" && exit 1\", \"start\": \"node index.js\" }, \"packageManager\": \"pnpm@10.5.2+sha512.da9dc28cd3ff40d0592188235ab25d3202add8a207afbedc682220e4a0029ffbff4562102b9e6e46b4e3f9e8bd53e6d05de48544b0c57d4b0179e22c76d1199b\", \"dependencies\": { \"mem0ai\": \"^2.1.2\", \"openai\": \"^4.87.2\", \"zod\": \"^3.24.2\" } } 证据：`examples/openai-inbuilt-tools/package.json`\n- **Package**（package_manifest）：{ \"name\": \"mem0-sdk-chat-bot\", \"private\": true, \"version\": \"0.0.0\", \"type\": \"module\", \"scripts\": { \"dev\": \"vite\", \"build\": \"tsc -b && vite build\", \"lint\": \"eslint .\", \"preview\": \"vite preview\" }, \"dependencies\": { \"@mem0/vercel-ai-provider\": \"0.0.12\", \"@radix-ui/react-avatar\": \"^1.1.1\", \"@radix-ui/react-dialog\": \"^1.1.2\", \"@radix-ui/react-icons\": \"^1.3.1\", \"@radix-ui/react-label\": \"^2.1.0\", \"@radix-ui/react-scroll-area\": \"^1.2.0\", \"@radix-ui/react-select\": \"^2.1.2\", \"@radix-ui/react-slot\": \"^1.1.0\", \"ai\": \"4.1.42\", \"buffer\": \"^6.0.3\", \"class-variance-authority\": \"^0.7.0\", \"clsx\": \"^2.1.1\", \"framer-motion\": \"^11.11.11\", \"lucide-react\": \"^0.454.0\", \"openai\": \"^4.86.2\", \"react\": \"^18.3.1\", \"re… 证据：`examples/vercel-ai-sdk-chat-app/package.json`\n- **Package**（package_manifest）：{ \"name\": \"mem0-assistant\", \"version\": \"1.0.0\", \"description\": \"A Chrome extension that integrates AI chat functionality directly into YouTube and other sites. Get instant answers about video content without leaving the page.\", \"main\": \"background.js\", \"scripts\": { \"build\": \"webpack --config webpack.config.js\", \"watch\": \"webpack --config webpack.config.js --watch\" }, \"keywords\": , \"author\": \"\", \"license\": \"ISC\", \"devDependencies\": { \"@babel/core\": \"^7.22.0\", \"@babel/preset-env\": \"^7.22.0\", \"babel-loader\": \"^9.1.2\", \"css-loader\": \"^7.1.2\", \"style-loader\": \"^4.0.0\", \"webpack\": \"^5.85.0\", \"webpack-cli\": \"^5.1.1\", \"youtube-transcript\": \"^1.0.6\" }, \"dependencies\": { \"mem0ai\": \"^2.1.15\" } } 证据：`examples/yt-assistant-chrome/package.json`\n- **Package**（package_manifest）：{ \"name\": \"@mem0/community\", \"version\": \"0.0.1\", \"description\": \"Community features for Mem0\", \"main\": \"./dist/index.js\", \"module\": \"./dist/index.mjs\", \"types\": \"./dist/index.d.ts\", \"exports\": { \".\": { \"types\": \"./dist/index.d.ts\", \"require\": \"./dist/index.js\", \"import\": \"./dist/index.mjs\" }, \"./langchain\": { \"types\": \"./dist/integrations/langchain/index.d.ts\", \"require\": \"./dist/integrations/langchain/index.js\", \"import\": \"./dist/integrations/langchain/index.mjs\" } }, \"files\": \"dist\" , \"scripts\": { \"clean\": \"rimraf dist\", \"build\": \"npm run clean && npx prettier --check . && npx tsup\", \"dev\": \"npx nodemon\", \"test\": \"jest\", \"test:ts\": \"jest --config jest.config.js\", \"test:watch\": \"jest --con… 证据：`mem0-ts/src/community/package.json`\n- **Package**（package_manifest）：{ \"name\": \"mem0ai-oss\", \"version\": \"1.0.0\", \"description\": \"TypeScript implementation of mem0 memory system\", \"main\": \"dist/index.js\", \"types\": \"dist/index.d.ts\", \"scripts\": { \"build\": \"tsc\", \"test\": \"jest\", \"start\": \"pnpm run example memory\", \"example\": \"ts-node examples/vector-stores/index.ts\", \"clean\": \"rimraf dist\", \"prepare\": \"npm run build\" }, \"dependencies\": { \"@anthropic-ai/sdk\": \"^0.18.0\", \"@google/genai\": \"^0.7.0\", \"@qdrant/js-client-rest\": \"^1.13.0\", \"@types/node\": \"^20.11.19\", \"@types/pg\": \"^8.11.0\", \"@types/redis\": \"^4.0.10\", \"@types/uuid\": \"^9.0.8\", \"cloudflare\": \"^4.2.0\", \"dotenv\": \"^16.4.4\", \"groq-sdk\": \"^0.3.0\", \"openai\": \"^4.28.0\", \"pg\": \"^8.11.3\", \"redis\": \"^4.7.0\", \"bett… 证据：`mem0-ts/src/oss/package.json`\n- **Contributing to OpenMemory**（documentation）：We are a team of developers passionate about the future of AI and open-source software. With years of experience in both fields, we believe in the power of community-driven development and are excited to build tools that make AI more accessible and personalized. 证据：`openmemory/CONTRIBUTING.md`\n- **Package**（package_manifest）：{ \"name\": \"my-v0-project\", \"version\": \"0.1.0\", \"private\": true, \"scripts\": { \"dev\": \"next dev\", \"build\": \"next build\", \"start\": \"next start\", \"lint\": \"next lint\" }, \"dependencies\": { \"@hookform/resolvers\": \"^3.9.1\", \"@radix-ui/react-accordion\": \"^1.2.2\", \"@radix-ui/react-alert-dialog\": \"^1.1.4\", \"@radix-ui/react-aspect-ratio\": \"^1.1.1\", \"@radix-ui/react-avatar\": \"^1.1.2\", \"@radix-ui/react-checkbox\": \"^1.1.3\", \"@radix-ui/react-collapsible\": \"^1.1.2\", \"@radix-ui/react-context-menu\": \"^2.2.4\", \"@radix-ui/react-dialog\": \"^1.1.4\", \"@radix-ui/react-dropdown-menu\": \"^2.1.4\", \"@radix-ui/react-hover-card\": \"^1.1.4\", \"@radix-ui/react-label\": \"^2.1.1\", \"@radix-ui/react-menubar\": \"^1.1.4\", \"@radix-ui/r… 证据：`openmemory/ui/package.json`\n- **Package**（package_manifest）：{ \"name\": \"mem0-dashboard\", \"version\": \"0.1.0\", \"private\": true, \"scripts\": { \"dev\": \"next dev\", \"build\": \"next build\", \"start\": \"next start\", \"lint\": \"prettier --check .\", \"format\": \"prettier --write .\", \"typecheck\": \"tsc --noEmit\" }, \"dependencies\": { \"@hookform/resolvers\": \"^3.6.0\", \"@radix-ui/react-accordion\": \"^1.2.0\", \"@radix-ui/react-alert-dialog\": \"^1.1.4\", \"@radix-ui/react-avatar\": \"^1.0.4\", \"@radix-ui/react-checkbox\": \"^1.0.4\", \"@radix-ui/react-collapsible\": \"^1.1.3\", \"@radix-ui/react-dialog\": \"^1.1.1\", \"@radix-ui/react-dropdown-menu\": \"^2.0.6\", \"@radix-ui/react-hover-card\": \"^1.1.1\", \"@radix-ui/react-icons\": \"^1.3.0\", \"@radix-ui/react-label\": \"^2.1.0\", \"@radix-ui/react-popover\":… 证据：`server/dashboard/package.json`\n- **Mem0 MCP Memory Protocol**（skill_instruction）：You have access to persistent memory via the mem0 MCP tools. Follow this protocol to maintain context across sessions. 证据：`mem0-plugin/skills/mem0-mcp/SKILL.md`\n- **Mem0 Platform Integration**（skill_instruction）：Skill Graph: This skill is part of the Mem0 skill graph: - mem0 this skill -- Platform Client SDK + OSS Python + TypeScript - mem0-cli https://github.com/mem0ai/mem0/tree/main/skills/mem0-cli -- Command-line interface - mem0-vercel-ai-sdk https://github.com/mem0ai/mem0/tree/main/skills/mem0-vercel-ai-sdk -- Vercel AI SDK provider 证据：`mem0-plugin/skills/mem0/SKILL.md`\n- **Memory Consolidation**（skill_instruction）：You are performing a memory consolidation pass. Your goal is to review all stored memories for this user and improve their overall quality. Think of this as compressing raw observations into clean, durable knowledge. 证据：`openclaw/skills/memory-dream/SKILL.md`\n- **Memory Protocol**（skill_instruction）：You have persistent long-term memory powered by mem0. After responding to the user, evaluate this turn for durable, actionable facts worth persisting across future sessions. 证据：`openclaw/skills/memory-triage/SKILL.md`\n- **Mem0 CLI**（skill_instruction）：The official command-line interface for the Mem0 memory platform. Add, search, list, update, and delete memories from the terminal -- for developers, AI agents, and CI/CD pipelines. 证据：`skills/mem0-cli/SKILL.md`\n- **mem0-integrate**（skill_instruction）：Wire Mem0 into an existing repo with a goal-driven, test-first pipeline. Pairs with mem0-test-integration for verification. 证据：`skills/mem0-integrate/SKILL.md`\n- 其余 20 条证据见 `AI_CONTEXT_PACK.json` 或 `EVIDENCE_INDEX.json`。\n\n## 宿主 AI 必须遵守的规则\n\n- **把本资产当作开工前上下文，而不是运行环境。**：AI Context Pack 只包含证据化项目理解，不包含目标项目的可执行状态。 证据：`docs/README.md`, `embedchain/docs/README.md`, `AGENTS.md`\n- **回答用户时区分可预览内容与必须安装后才能验证的内容。**：安装前体验的消费者价值来自降低误装和误判，而不是伪装成真实运行。 证据：`docs/README.md`, `embedchain/docs/README.md`, `AGENTS.md`\n\n## 用户开工前应该回答的问题\n\n- 你准备在哪个宿主 AI 或本地环境中使用它？\n- 你只是想先体验工作流，还是准备真实安装？\n- 你最在意的是安装成本、输出质量、还是和现有规则的冲突？\n\n## 验收标准\n\n- 所有能力声明都能回指到 evidence_refs 中的文件路径。\n- AI_CONTEXT_PACK.md 没有把预览包装成真实运行。\n- 用户能在 3 分钟内看懂适合谁、能做什么、如何开始和风险边界。\n\n---\n\n## Doramagic Context Augmentation\n\n下面内容用于强化 Repomix/AI Context Pack 主体。Human Manual 只提供阅读骨架；踩坑日志会被转成宿主 AI 必须遵守的工作约束。\n\n## Human Manual 骨架\n\n使用规则：这里只是项目阅读路线和显著性信号，不是事实权威。具体事实仍必须回到 repo evidence / Claim Graph。\n\n宿主 AI 硬性规则：\n- 不得把页标题、章节顺序、摘要或 importance 当作项目事实证据。\n- 解释 Human Manual 骨架时，必须明确说它只是阅读路线/显著性信号。\n- 能力、安装、兼容性、运行状态和风险判断必须引用 repo evidence、source path 或 Claim Graph。\n\n- **Introduction to Mem0**：importance `high`\n  - source_paths: README.md, docs/introduction.mdx, docs/open-source/overview.mdx, docs/platform/overview.mdx, docs/platform/platform-vs-oss.mdx\n- **Quick Start Guide**：importance `high`\n  - source_paths: docs/open-source/python-quickstart.mdx, docs/open-source/node-quickstart.mdx, docs/platform/quickstart.mdx, pyproject.toml, mem0-ts/package.json\n- **Use Cases and Applications**：importance `medium`\n  - source_paths: cookbooks/customer-support-chatbot.ipynb, docs/cookbooks/overview.mdx, docs/cookbooks/operations/support-inbox.mdx, docs/cookbooks/operations/email-automation.mdx, examples/misc/healthcare_assistant_google_adk.py\n- **System Architecture**：importance `high`\n  - source_paths: mem0/memory/main.py, mem0/memory/base.py, mem0/memory/storage.py, server/main.py, server/routers/__init__.py\n- **Memory Operations**：importance `high`\n  - source_paths: mem0/memory/main.py, docs/core-concepts/memory-operations/add.mdx, docs/core-concepts/memory-operations/search.mdx, docs/core-concepts/memory-operations/update.mdx, docs/core-concepts/memory-operations/delete.mdx\n- **AI Model Integration**：importance `high`\n  - source_paths: mem0/llms/base.py, mem0/llms/openai.py, mem0/llms/anthropic.py, mem0/llms/azure_openai.py, mem0/llms/gemini.py\n- **Vector Stores and Storage**：importance `high`\n  - source_paths: mem0/vector_stores/base.py, mem0/vector_stores/pinecone.py, mem0/vector_stores/qdrant.py, mem0/vector_stores/chroma.py, mem0/vector_stores/pgvector.py\n- **Embedding Models**：importance `high`\n  - source_paths: mem0/embeddings/base.py, mem0/embeddings/openai.py, mem0/embeddings/azure_openai.py, mem0/embeddings/huggingface.py, mem0/embeddings/ollama.py\n\n## Repo Inspection Evidence / 源码检查证据\n\n- repo_clone_verified: true\n- repo_inspection_verified: true\n- repo_commit: `219b1a6f3d376989e76fc68362bb577cb189ec31`\n- inspected_files: `pyproject.toml`, `README.md`, `docs/integrations.mdx`, `docs/openapi.json`, `docs/README.md`, `docs/api-reference.mdx`, `docs/docs.json`, `docs/vibecoding.mdx`, `docs/introduction.mdx`, `docs/core-concepts/memory-evaluation.mdx`, `docs/core-concepts/memory-types.mdx`, `docs/migration/oss-v2-to-v3.mdx`, `docs/migration/api-changes.mdx`, `docs/migration/oss-to-platform.mdx`, `docs/migration/platform-v2-to-v3.mdx`, `docs/api-reference/organizations-projects.mdx`, `docs/platform/contribute.mdx`, `docs/platform/cli.mdx`, `docs/platform/faqs.mdx`, `docs/platform/platform-vs-oss.mdx`\n\n宿主 AI 硬性规则：\n- 没有 repo_clone_verified=true 时，不得声称已经读过源码。\n- 没有 repo_inspection_verified=true 时，不得把 README/docs/package 文件判断写成事实。\n- 没有 quick_start_verified=true 时，不得声称 Quick Start 已跑通。\n\n## Doramagic Pitfall Constraints / 踩坑约束\n\n这些规则来自 Doramagic 发现、验证或编译过程中的项目专属坑点。宿主 AI 必须把它们当作工作约束，而不是普通说明文字。\n\n### Constraint 1: 能力判断依赖假设\n\n- Trigger: README/documentation is current enough for a first validation pass.\n- Host AI rule: 将假设转成下游验证清单。\n- Why it matters: 假设不成立时，用户拿不到承诺的能力。\n- Evidence: capability.assumptions | github_repo:656099147 | https://github.com/mem0ai/mem0 | README/documentation is current enough for a first validation pass.\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 2: 维护活跃度未知\n\n- Trigger: 未记录 last_activity_observed。\n- Host AI rule: 补 GitHub 最近 commit、release、issue/PR 响应信号。\n- Why it matters: 新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- Evidence: evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | last_activity_observed missing\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 3: 下游验证发现风险项\n\n- Trigger: no_demo\n- Host AI rule: 进入安全/权限治理复核队列。\n- Why it matters: 下游已经要求复核，不能在页面中弱化。\n- Evidence: downstream_validation.risk_items | github_repo:656099147 | https://github.com/mem0ai/mem0 | no_demo; severity=medium\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 4: 存在评分风险\n\n- Trigger: no_demo\n- Host AI rule: 把风险写入边界卡，并确认是否需要人工复核。\n- Why it matters: 风险会影响是否适合普通用户安装。\n- Evidence: risks.scoring_risks | github_repo:656099147 | https://github.com/mem0ai/mem0 | no_demo; severity=medium\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 5: issue/PR 响应质量未知\n\n- Trigger: issue_or_pr_quality=unknown。\n- Host AI rule: 抽样最近 issue/PR，判断是否长期无人处理。\n- Why it matters: 用户无法判断遇到问题后是否有人维护。\n- Evidence: evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | issue_or_pr_quality=unknown\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n\n### Constraint 6: 发布节奏不明确\n\n- Trigger: release_recency=unknown。\n- Host AI rule: 确认最近 release/tag 和 README 安装命令是否一致。\n- Why it matters: 安装命令和文档可能落后于代码，用户踩坑概率升高。\n- Evidence: evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | release_recency=unknown\n- Hard boundary: 不要把这个坑点包装成已解决、已验证或可忽略，除非后续验证证据明确证明它已经关闭。\n",
      "summary": "给宿主 AI 的上下文和工作边界。",
      "title": "AI Context Pack / 带给我的 AI"
    },
    "boundary_risk_card": {
      "asset_id": "boundary_risk_card",
      "filename": "BOUNDARY_RISK_CARD.md",
      "markdown": "# Boundary & Risk Card / 安装前决策卡\n\n项目：mem0ai/mem0\n\n## Doramagic 试用结论\n\n当前结论：可以进入发布前推荐检查；首次使用仍应从最小权限、临时目录和可回滚配置开始。\n\n## 用户现在可以做\n\n- 可以先阅读 Human Manual，理解项目目的和主要工作流。\n- 可以复制 Prompt Preview 做安装前体验；这只验证交互感，不代表真实运行。\n- 可以把官方 Quick Start 命令放到隔离环境中验证，不要直接进主力环境。\n\n## 现在不要做\n\n- 不要把 Prompt Preview 当成项目实际运行结果。\n- 不要把 metadata-only validation 当成沙箱安装验证。\n- 不要把未验证能力写成“已支持、已跑通、可放心安装”。\n- 不要在首次试用时交出生产数据、私人文件、真实密钥或主力配置目录。\n\n## 安装前检查\n\n- 宿主 AI 是否匹配：chatgpt\n- 官方安装入口状态：已发现官方入口\n- 是否在临时目录、临时宿主或容器中验证：必须是\n- 是否能回滚配置改动：必须能\n- 是否需要 API Key、网络访问、读写文件或修改宿主配置：未确认前按高风险处理\n- 是否记录了安装命令、实际输出和失败日志：必须记录\n\n## 当前阻塞项\n\n- 无阻塞项。\n\n## 项目专属踩坑\n\n- 能力判断依赖假设（medium）：假设不成立时，用户拿不到承诺的能力。 建议检查：将假设转成下游验证清单。\n- 维护活跃度未知（medium）：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 下游验证发现风险项（medium）：下游已经要求复核，不能在页面中弱化。 建议检查：进入安全/权限治理复核队列。\n- 存在评分风险（medium）：风险会影响是否适合普通用户安装。 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- issue/PR 响应质量未知（low）：用户无法判断遇到问题后是否有人维护。 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n\n## 风险与权限提示\n\n- no_demo: medium\n\n## 证据缺口\n\n- 暂未发现结构化证据缺口。\n",
      "summary": "安装、权限、验证和推荐前风险。",
      "title": "Boundary & Risk Card / 边界与风险卡"
    },
    "human_manual": {
      "asset_id": "human_manual",
      "filename": "HUMAN_MANUAL.md",
      "markdown": "# https://github.com/mem0ai/mem0 项目说明书\n\n生成时间：2026-05-16 07:03:34 UTC\n\n## 目录\n\n- [Introduction to Mem0](#page-introduction)\n- [Quick Start Guide](#page-quickstart)\n- [Use Cases and Applications](#page-use-cases)\n- [System Architecture](#page-architecture)\n- [Memory Operations](#page-memory-operations)\n- [AI Model Integration](#page-ai-integration)\n- [Vector Stores and Storage](#page-vector-stores)\n- [Embedding Models](#page-embeddings)\n- [Python SDK](#page-python-sdk)\n- [TypeScript/Node.js SDK](#page-typescript-sdk)\n\n<a id='page-introduction'></a>\n\n## Introduction to Mem0\n\n### 相关页面\n\n相关主题：[System Architecture](#page-architecture), [Quick Start Guide](#page-quickstart), [Use Cases and Applications](#page-use-cases)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/mem0ai/mem0/blob/main/README.md)\n- [evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n- [cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n- [cli/python/README.md](https://github.com/mem0ai/mem0/blob/main/cli/python/README.md)\n- [mem0-ts/src/oss/README.md](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/oss/README.md)\n- [skills/README.md](https://github.com/mem0ai/mem0/blob/main/skills/README.md)\n</details>\n\n# Introduction to Mem0\n\nMem0 is an open-source memory infrastructure designed specifically for AI agents and applications. It provides intelligent, persistent memory management that enables AI systems to retain, retrieve, and utilize information across conversations and sessions. Unlike traditional retrieval-augmented generation (RAG) approaches that treat all context equally, Mem0 implements a hierarchical memory system that automatically prioritizes and maintains relevant information over time.\n\nThe platform addresses one of the most significant challenges in AI development: creating systems that can remember user preferences, conversation context, and learned facts in a way that feels natural and improves over time. Mem0 serves as a foundational layer for building production-ready AI agents with scalable long-term memory capabilities.\n\n## Core Concepts\n\n### Memory Hierarchy\n\nMem0 organizes memory into multiple scopes, enabling fine-grained control over information retention and retrieval. The system distinguishes between user-level, agent-level, and session-level memories, allowing developers to choose the appropriate context for different types of information.\n\n| Scope Level | Description | Use Case |\n|-------------|-------------|----------|\n| **User** | Global preferences and facts about a specific user | User preferences, historical context |\n| **Agent** | Information relevant to a specific AI agent instance | Agent-specific learning, personality traits |\n| **Session** | Temporary context within a single conversation | Current discussion topics, immediate context |\n| **Run** | Information specific to a particular execution context | Workflow-specific state |\n\n### Memory Operations\n\nThe memory system supports four fundamental operations that form the backbone of all interactions:\n\n**Add** - Stores new information in the memory system with automatic entity extraction and deduplication. The system intelligently parses input to identify key facts, relationships, and metadata.\n\n**Search** - Retrieves relevant memories using vector similarity search combined with semantic understanding. The search operation supports hybrid queries that combine keyword matching with semantic similarity.\n\n**Update** - Modifies existing memories when new information supersedes or refines previously stored facts. The system maintains version history for audit purposes.\n\n**Delete** - Removes specific memories or bulk deletes based on scope filters. Supports soft deletes and hard deletes depending on compliance requirements.\n\n## Architecture Overview\n\nMem0's architecture is designed with modularity and extensibility in mind. The system consists of several interconnected components that work together to provide seamless memory management.\n\n```mermaid\ngraph TD\n    A[AI Agent / Application] --> B[Mem0 API Layer]\n    B --> C[Memory Core Engine]\n    C --> D[Vector Store]\n    C --> E[Graph Store]\n    C --> F[SQLite / Database]\n    D --> G[Embedding Models]\n    E --> H[Entity Extraction]\n    F --> I[Metadata Storage]\n    B --> J[LLM Integration]\n    J --> K[Fact Extraction]\n    J --> L[Memory Synthesis]\n```\n\n### Key Components\n\n| Component | Function | Extensible |\n|-----------|----------|------------|\n| **API Layer** | REST interface for memory operations | Yes - custom endpoints |\n| **Memory Core** | Orchestrates memory operations | Yes - custom strategies |\n| **Vector Store** | Stores embeddings for semantic search | Yes - multiple backends |\n| **Graph Store** | Manages entity relationships | Yes - Neo4j, in-memory |\n| **LLM Integration** | Powers extraction and synthesis | Yes - OpenAI, Anthropic, local |\n| **Embedding Service** | Generates vector representations | Yes - OpenAI, HuggingFace |\n\n## Deployment Options\n\nMem0 offers multiple deployment options to meet different organizational requirements and use cases.\n\n### Cloud Platform\n\nThe managed Mem0 Platform provides a fully hosted solution with zero infrastructure management. Users can sign up at app.mem0.ai and immediately begin using the memory infrastructure via SDK or API keys. The cloud platform includes built-in monitoring, automatic scaling, and enterprise-grade security features.\n\n### Self-Hosted Server\n\nFor organizations requiring on-premise deployment or data sovereignty, Mem0 provides a self-hosted option using Docker Compose. The server includes a web-based dashboard for configuration and management.\n\n```bash\n# Recommended bootstrap command\ncd server && make bootstrap\n\n# Manual start\ncd server && docker compose up -d\n```\n\nSelf-hosted deployments support authentication out of the box, with options to configure admin accounts and API keys through a setup wizard or environment variables. The `ADMIN_API_KEY` environment variable enables programmatic admin creation for automated deployments.\n\n### Python SDK\n\nThe primary Python SDK provides the most comprehensive feature set for Python-based applications:\n\n```bash\npip install mem0ai\n```\n\nFor NLP-enhanced features including BM25 keyword matching and entity extraction:\n\n```bash\npip install mem0ai[nlp]\npython -m spacy download en_core_web_sm\n```\n\n### TypeScript/JavaScript SDK\n\nThe official npm package provides TypeScript-first support for JavaScript and TypeScript applications:\n\n```bash\nnpm install mem0ai\n```\n\nThe TypeScript implementation (`mem0-ts`) offers an alternative open-source option using OpenAI for embeddings and completions, with SQLite-based history tracking and optional graph-based memory relationships.\n\n### CLI Tools\n\nCommand-line interfaces are available for both Python and Node.js environments:\n\n```bash\n# Python CLI\npip install mem0-cli\n\n# Node.js CLI\nnpm install -g @mem0/cli\n```\n\n## Key Features\n\n### Intelligent Memory Extraction\n\nMem0 automatically extracts entities, facts, and relationships from conversational input. The extraction process uses large language models to understand context and identify meaningful information that should be stored. This reduces the burden on developers to explicitly specify what to remember.\n\n### Hybrid Search Capabilities\n\nMemory retrieval combines multiple search techniques for optimal results:\n\n- **Vector similarity search** - Finds semantically similar memories using embeddings\n- **BM25 keyword matching** - Ensures exact keyword matches are captured\n- **Entity extraction** - Identifies specific entities for targeted retrieval\n\n### Graph-Based Memory (Mem0+)\n\nAn enhanced version called Mem0+ adds graph-based relationship tracking, enabling the system to understand connections between entities and facts. This is particularly useful for complex reasoning tasks that require understanding relationships between different pieces of information.\n\n### Custom Instructions\n\nMem0 supports custom extraction instructions that guide the memory system to prioritize specific types of information based on use case requirements. The platform can auto-generate these instructions based on a description of the application domain.\n\n## Configuration and Customization\n\n### Embedding Models\n\nMem0 supports multiple embedding providers and models:\n\n| Provider | Default Model | Custom Model Support |\n|----------|--------------|---------------------|\n| OpenAI | text-embedding-3-small | Yes |\n| HuggingFace | Various sentence-transformers | Yes |\n| Azure OpenAI | text-embedding-3-small | Yes |\n\n### LLM Configuration\n\nLanguage model settings control fact extraction and memory synthesis:\n\n- Provider selection (OpenAI, Anthropic, local models)\n- Model selection per operation type\n- API key management and key rotation\n- Temperature and generation parameters\n\n### Memory Storage\n\nConfigurable storage backends adapt to different deployment requirements:\n\n```mermaid\ngraph LR\n    A[Memory Write] --> B{Storage Backend}\n    B --> C[In-Memory]\n    B --> D[SQLite]\n    B --> E[PostgreSQL + pgvector]\n    B --> F[Qdrant]\n    B --> G[ChromaDB]\n    B --> H[Weaviate]\n```\n\n## Use Cases\n\nMem0 supports a wide range of applications where persistent memory is valuable:\n\n**Personal AI Assistants** - Maintain user preferences, conversation history, and learned habits across sessions to provide increasingly personalized experiences.\n\n**Customer Service Bots** - Remember customer context across multiple support interactions, eliminating the need for customers to repeat information.\n\n**Developer Tools** - Enable AI coding assistants to learn team conventions, project-specific patterns, and individual developer preferences.\n\n**Healthcare Applications** - Maintain patient history and context across appointments while ensuring data privacy and compliance.\n\n**Educational Platforms** - Track student progress, learning preferences, and knowledge gaps to provide personalized tutoring experiences.\n\n## Evaluation Framework\n\nMem0 includes a comprehensive evaluation framework for assessing memory system performance across different scenarios. The framework supports comparison between multiple memory techniques including base Mem0, Mem0+, RAG implementations, and LangMem.\n\n| Technique | Command | Description |\n|-----------|---------|-------------|\n| `run-mem0-add` | Add memories using Mem0 | Standard memory addition |\n| `run-mem0-search` | Search memories using Mem0 | Standard memory retrieval |\n| `run-mem0-plus-add` | Add memories using Mem0+ | Graph-enhanced addition |\n| `run-mem0-plus-search` | Search memories using Mem0+ | Graph-enhanced retrieval |\n| `run-rag` | RAG with chunk size 500 | Baseline RAG comparison |\n\nThe evaluation framework uses Makefile commands for standardized testing and supports custom parameter configuration via command-line arguments.\n\n## Citation\n\nIf you use Mem0 in your research or development, please cite the following paper:\n\n```bibtex\n@article{mem0,\n  title={Mem0: Building Production-Ready AI Agents with Scalable Long-Term Memory},\n  author={Chhikara, Prateek and Khant, Dev and Aryan, Saket and Singh, Taranjeet and Yadav, Deshraj},\n  journal={arXiv preprint arXiv:2504.19413},\n  year={2025}\n}\n```\n\n## License\n\nMem0 is released under the Apache 2.0 license, enabling both commercial and open-source usage with minimal restrictions. The permissive license allows integration into proprietary applications while requiring attribution and preservation of copyright notices.\n\n---\n\n<a id='page-quickstart'></a>\n\n## Quick Start Guide\n\n### 相关页面\n\n相关主题：[Introduction to Mem0](#page-introduction), [Python SDK](#page-python-sdk), [TypeScript/Node.js SDK](#page-typescript-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/mem0ai/mem0/blob/main/README.md)\n- [evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n- [cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n- [openmemory/api/README.md](https://github.com/mem0ai/mem0/blob/main/openmemory/api/README.md)\n- [server/dashboard/src/app/setup/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/setup/page.tsx)\n- [server/dashboard/src/app/(root)/dashboard/configuration/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/configuration/page.tsx)\n- [server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx)\n</details>\n\n# Quick Start Guide\n\nMem0 provides a comprehensive memory infrastructure for AI applications, enabling persistent, personalized, and adaptive AI experiences. This guide covers all deployment options to get you up and running quickly.\n\n## Overview\n\nMem0 is a production-ready memory layer for AI agents that handles memory management including storing, retrieving, and updating user/agent memories across interactions. The platform supports multiple deployment options: cloud-hosted, self-hosted server, and local SDK integration.\n\n**Key Features:**\n- Multi-level memory (user, agent, session, app)\n- Hybrid search with semantic and keyword matching\n- Entity extraction and relationship tracking\n- Cloud, self-hosted, and SDK deployment options\n- Cross-platform SDK support (Python, Node.js, CLI)\n\n资料来源：[README.md:1-30]()\n\n## Installation Methods\n\nMem0 supports multiple installation pathways depending on your use case and deployment preference.\n\n### Python SDK\n\nInstall the core Mem0 package via pip:\n\n```bash\npip install mem0ai\n```\n\nFor enhanced search capabilities with NLP support (BM25 keyword matching and entity extraction):\n\n```bash\npip install mem0ai[nlp]\npython -m spacy download en_core_web_sm\n```\n\n资料来源：[README.md:12-16]()\n\n### Node.js SDK\n\nFor JavaScript/TypeScript environments:\n\n```bash\nnpm install mem0ai\n```\n\n资料来源：[README.md:20-22]()\n\n### CLI Tool\n\nInstall the Mem0 CLI for terminal-based memory management:\n\n```bash\nnpm install -g @mem0/cli   # or: pip install mem0-cli\n```\n\n资料来源：[README.md:24-26]()\n\n## Deployment Options\n\nMem0 offers three deployment models to fit different infrastructure requirements.\n\n```mermaid\ngraph TD\n    A[Mem0 Deployment Options] --> B[Cloud Platform]\n    A --> C[Self-Hosted Server]\n    A --> D[Local SDK Integration]\n    \n    B --> B1[app.mem0.ai]\n    B --> B2[API Key Required]\n    \n    C --> C1[Docker Compose]\n    C --> C2[Custom Configuration]\n    \n    D --> D1[Python SDK]\n    D --> D2[Node.js SDK]\n```\n\n资料来源：[README.md:1-30]()\n\n### Cloud Platform\n\nThe quickest path to production memory infrastructure:\n\n1. Sign up at [Mem0 Platform](https://app.mem0.ai?utm_source=oss&utm_medium=readme)\n2. Embed the memory layer via SDK or API keys\n3. Start using memory operations immediately\n\n资料来源：[README.md:28-32]()\n\n### Self-Hosted Server\n\nFor organizations requiring full control over their infrastructure.\n\n#### Quick Bootstrap (Recommended)\n\n```bash\ncd server && make bootstrap\n```\n\nThis single command starts the Docker stack, creates an admin account, and issues your first API key.\n\n#### Manual Setup\n\n```bash\ncd server && docker compose up -d\n```\n\nAccess the setup wizard at `http://localhost:3000`.\n\n> **Note:** Self-hosted authentication is enabled by default. If upgrading from a pre-auth build, set `ADMIN_API_KEY`, register an admin through the wizard, or use `AUTH_DISABLED=true` for local development only.\n\n资料来源：[README.md:17-19]()\n\n**Configuration Requirements:**\nFor detailed configuration options, refer to the [self-hosted documentation](https://docs.mem0.ai/open-source/overview).\n\n资料来源：[README.md:18-19]()\n\n## Initial Setup Workflow\n\n```mermaid\ngraph LR\n    A[Initialize Mem0] --> B[Configure Provider]\n    B --> C[Set API Keys]\n    C --> D[Add Memories]\n    D --> E[Search/Retrieve]\n    \n    F[CLI: mem0 init] --> A\n    G[SDK: Mem0() config] --> A\n```\n\n### Web Dashboard Setup\n\nThe self-hosted server includes a guided setup wizard with the following steps:\n\n| Step | Title | Description |\n|------|-------|-------------|\n| 0 | Create Admin Account | Set up initial admin credentials (name, email, password) |\n| 1 | Configure Provider | Select LLM provider and enter API credentials |\n| 2 | Select Use Case | Choose preset or enter custom use case for instruction generation |\n| 3 | Generate Instructions | Auto-generate custom memory extraction instructions |\n| 4 | Test Setup | Verify configuration with a test API call |\n\n资料来源：[server/dashboard/src/app/setup/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/setup/page.tsx)\n\n**Setup Commands Example:**\n\n```bash\ncurl -X POST ${apiUrl}/memories \\\n  -H \"X-API-Key: ${apiKey}\" \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\"messages\": [{\"role\": \"user\", \"content\": \"${testMessage}\"}], \"user_id\": \"setup-test\"}'\n```\n\n资料来源：[server/dashboard/src/app/setup/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/setup/page.tsx)\n\n### CLI Setup\n\nInitialize the CLI with your credentials:\n\n```bash\nmem0 init\nmem0 add \"Prefers dark mode and vim keybindings\" --user-id alice\nmem0 search \"What does Alice prefer?\" --user-id alice\n```\n\n资料来源：[README.md:26-27]()\n\n### Provider Configuration\n\nConfigure your LLM and embedding providers:\n\n| Setting | Description | Example Value |\n|---------|-------------|---------------|\n| LLM Provider | Language model provider | OpenAI, Anthropic, Azure OpenAI |\n| LLM Model | Specific model identifier | gpt-4o, claude-3-5-sonnet-20240620 |\n| Embedder Provider | Embedding model provider | OpenAI, Azure OpenAI |\n| Embedder Model | Embedding model identifier | text-embedding-3-small |\n| API Key | Provider authentication key | sk-... |\n\n资料来源：[server/dashboard/src/app/(root)/dashboard/configuration/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/configuration/page.tsx)\n\n## Core Operations\n\n### Adding Memories\n\nMemories can be added through various interfaces:\n\n**CLI:**\n```bash\nmem0 add \"User prefers dark mode\" --user-id alice\nmem0 add \"Agent configuration\" --agent-id bot-123\n```\n\n**SDK (Python):**\n```python\nfrom mem0 import Mem0\n\nclient = Mem0()\nclient.add(\"User prefers dark mode\", user_id=\"alice\")\n```\n\n### Searching Memories\n\n```bash\nmem0 search \"What are user preferences?\" --user-id alice\n```\n\n### Bulk Import\n\nImport memories from a JSON file:\n\n```bash\nmem0 import data.json --user-id alice\n```\n\nJSON file format:\n```json\n[\n  {\n    \"memory\": \"User prefers dark mode\",\n    \"user_id\": \"alice\",\n    \"metadata\": {\"source\": \"survey\"}\n  }\n]\n```\n\nEach item can include `memory` (or `text` or `content`), optional `user_id`, `agent_id`, and `metadata` fields.\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n### Entity Management\n\n```bash\n# List entities\nmem0 entity list users\nmem0 entity list agents --output json\n\n# Delete entities\nmem0 entity delete --user-id alice --force\n```\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n## API Key Management\n\n### Creating API Keys\n\n1. Navigate to **Dashboard → API Keys**\n2. Click **Create API Key**\n3. Save the generated key securely\n\n> **Important:** Save your API key immediately after creation — it will not be displayed again.\n\n资料来源：[server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx)\n\n### Key Limitations\n\n| Plan | Key Limit | Notes |\n|------|-----------|-------|\n| Free | 3 keys | Consider Cloud for multiple applications |\n| Cloud | Multiple | Project-based isolation available |\n\nA warning banner appears when you reach the 3-key limit on self-hosted deployments.\n\n资料来源：[server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/api-keys/page.tsx)\n\n## CLI Commands Reference\n\n| Command | Description |\n|---------|-------------|\n| `mem0 init` | Initialize CLI with credentials |\n| `mem0 add <text>` | Add a memory |\n| `mem0 search <query>` | Search memories |\n| `mem0 import <file>` | Bulk import from JSON |\n| `mem0 config show` | Display current config |\n| `mem0 config get <key>` | Get specific config value |\n| `mem0 config set <key> <value>` | Set a config value |\n| `mem0 entity list <type>` | List entities (users/agents/apps/runs) |\n| `mem0 entity delete` | Delete an entity |\n| `mem0 event list` | List background events |\n| `mem0 event status <id>` | Check event status |\n| `mem0 status` | Verify API connection |\n| `mem0 version` | Print CLI version |\n\n**Flags:**\n- `--user-id <id>` — Specify user context\n- `--agent-id <id>` — Specify agent context\n- `--preview` — Preview without deleting (for delete operations)\n- `--force` — Skip confirmation prompt\n- `-o, --output` — Output format (text/json)\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n## Docker Development (OpenMemory)\n\nFor local API development using OpenMemory:\n\n```bash\n# Build containers\nmake build\n\n# Create environment file\nmake env\n# Then edit api/.env and enter OPENAI_API_KEY\n\n# Start services\nmake up\n```\n\nThe API will be available at `http://localhost:8765`\n\n**Common Commands:**\n```bash\nmake logs      # View container logs\nmake shell     # Open shell in container\nmake migrate   # Run database migrations\nmake test      # Run tests\nmake test-clean # Run tests and clean up\nmake down      # Stop containers\n```\n\nAPI documentation available at:\n- Swagger UI: `http://localhost:8765/docs`\n- ReDoc: `http://localhost:8765/redoc`\n\n资料来源：[openmemory/api/README.md](https://github.com/mem0ai/mem0/blob/main/openmemory/api/README.md)\n\n## Running Experiments\n\nFor evaluation purposes, Mem0 provides experiment scripts:\n\n```bash\n# Memory Techniques\nmake run-mem0-add         # Add memories using Mem0\nmake run-mem0-search      # Search memories using Mem0\nmake run-mem0-plus-add    # Add memories using Mem0+ (graph-based)\nmake run-mem0-plus-search # Search memories using Mem0+\n\n# RAG Experiments\nmake run-rag              # Run RAG with chunk size 500\nmake run-full-context     # Run RAG with full context\n\n# Other Techniques\nmake run-langmem          # Run LangMem experiments\nmake run-zep-add          # Add memories using Zep\nmake run-zep-search       # Search memories using Zep\nmake run-openai           # Run OpenAI experiments\n```\n\n**Custom Parameters:**\n\n| Parameter | Description | Default |\n|-----------|-------------|---------|\n| `--technique_type` | Memory technique (mem0, rag, langmem) | mem0 |\n| `--method` | Method to use (add, search) | add |\n| `--chunk_size` | Chunk size for processing | 1000 |\n| `--top_k` | Number of results to retrieve | varies |\n\nAlternatively, run experiments directly:\n```bash\npython run_experiments.py --technique_type [mem0|rag|langmem] [additional parameters]\n```\n\n资料来源：[evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n\n## Next Steps\n\n- **Configuration:** Customize provider settings in the dashboard configuration page\n- **API Reference:** Explore the full API at `/docs` when running self-hosted\n- **Documentation:** Visit [docs.mem0.ai](https://docs.mem0.ai) for detailed guides\n- **Examples:** Check the `examples/` directory for integration demos\n- **CLI Help:** Run `mem0 --help` for command options\n\n## Citation\n\nIf you use Mem0 in your research or application, please cite:\n\n```bibtex\n@article{mem0,\n  title={Mem0: Building Production-Ready AI Agents with Scalable Long-Term Memory},\n  author={Chhikara, Prateek and Khant, Dev and Aryan, Saket and Singh, Taranjeet and Yadav, Deshraj},\n  journal={arXiv preprint arXiv:2504.19413},\n  year={2025}\n}\n```\n\n资料来源：[README.md:1-10]()\n\n---\n\n<a id='page-use-cases'></a>\n\n## Use Cases and Applications\n\n### 相关页面\n\n相关主题：[Introduction to Mem0](#page-introduction)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [README.md](https://github.com/mem0ai/mem0/blob/main/README.md)\n- [openclaw/README.md](https://github.com/mem0ai/mem0/blob/main/openclaw/README.md)\n- [evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n- [cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n- [examples/multimodal-demo/src/components/messages.tsx](https://github.com/mem0ai/mem0/blob/main/examples/multimodal-demo/src/components/messages.tsx)\n</details>\n\n# Use Cases and Applications\n\nMem0 provides a comprehensive memory infrastructure for AI applications, enabling developers to build intelligent systems that maintain context across conversations, users, and sessions. This page documents the primary use cases, application patterns, and real-world scenarios where Mem0 adds significant value.\n\n## Overview\n\nMem0 is designed as a memory layer for AI agents and applications. It addresses the fundamental challenge of maintaining stateful, personalized interactions in AI systems that are inherently stateless. The platform supports multiple deployment models including self-hosted servers, cloud platforms, and embedded SDK integrations. 资料来源：[README.md:1-30]()\n\n## Core Use Cases\n\n### Personal AI Assistants\n\nMem0 powers personal AI assistants that learn and remember user preferences, habits, and historical interactions. These assistants can recall past conversations, understand user context, and provide personalized responses based on accumulated knowledge.\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Mem0 Memory Layer]\n    B --> C{Retrieve Relevant Memories}\n    C --> D[User Preferences]\n    C --> E[Conversation History]\n    C --> F[Historical Context]\n    D --> G[AI Response Generation]\n    E --> G\n    F --> G\n    G --> H[Store New Memories]\n    H --> B\n```\n\n**Key Features:**\n- Persistent user profiles across sessions\n- Preference learning and adaptation\n- Context-aware response generation\n- Multi-turn conversation continuity\n\n### Customer Support Chatbots\n\nEnterprise customer support systems benefit from Mem0's ability to maintain conversation history and customer context. Support agents and chatbots can access previous tickets, understand ongoing issues, and provide consistent assistance across multiple interaction channels. 资料来源：[README.md:40-60]()\n\n**Implementation Pattern:**\n```python\n# Typical customer support memory flow\nmemory.add(\n    text=\"Customer reported payment failure on order #12345\",\n    user_id=\"customer_456\",\n    metadata={\"ticket_id\": \"T-789\", \"priority\": \"high\"}\n)\n```\n\n### Healthcare Assistants\n\nAI-powered healthcare applications use Mem0 to maintain patient context, track medical history, and ensure continuity of care across multiple interactions. These systems must handle sensitive data with appropriate privacy considerations while providing valuable clinical insights. 资料来源：[README.md:50-80]()\n\n**Key Considerations:**\n- HIPAA compliance for patient data\n- Structured memory storage for medical records\n- Temporal context preservation\n- Multi-provider information aggregation\n\n### Enterprise Knowledge Management\n\nOrganizations leverage Mem0 to build knowledge bases that automatically capture, organize, and retrieve institutional knowledge. Unlike static knowledge bases, Mem0-powered systems continuously learn from interactions and user feedback.\n\n| Feature | Description | Benefit |\n|---------|-------------|---------|\n| Semantic Search | Natural language queries across memories | Fast information retrieval |\n| Hybrid Search | BM25 + vector embeddings | Comprehensive results |\n| Entity Extraction | Automatic categorization | Organized knowledge |\n| Temporal Weighting | Recent information prioritized | Relevant responses |\n\n资料来源：[README.md:35-45]()\n\n## Application Architecture Patterns\n\n### Multi-Agent Systems\n\nMem0 supports complex multi-agent architectures where different agents share contextual information through a unified memory layer.\n\n```mermaid\ngraph LR\n    A[Agent A] -->|Read/Write| M[Mem0 Memory]\n    B[Agent B] -->|Read/Write| M\n    C[Agent C] -->|Read/Write| M\n    M --> D[Shared Context]\n    D --> E[Coordinated Actions]\n```\n\n**Multi-Agent Memory Configuration:**\n```python\nfrom mem0 import Memory\n\nmemory = Memory.from_ids(\n    user_id=\"shared_session_123\",\n    agent_id=None,  # Shared across agents\n    run_id=\"workflow_456\"\n)\n```\n\n### Retrieval-Augmented Generation (RAG)\n\nMem0 integrates with RAG pipelines to enhance LLM responses with retrieved memories. The platform supports configurable chunk sizes, embedding models, and hybrid search strategies. 资料来源：[evaluation/README.md:1-50]()\n\n| RAG Configuration | Parameter | Default Value |\n|-------------------|-----------|---------------|\n| Chunk Size | `chunk_size` | 1000 |\n| Embedding Model | `embedding_model` | text-embedding-3-small |\n| Search Technique | `technique_type` | mem0, rag, langmem |\n| Top-K Results | `top_k` | Configurable |\n\n### Multi-Modal Applications\n\nModern AI applications process multiple input types including text, images, and audio. Mem0 stores and retrieves context from multi-modal conversations, enabling coherent responses across different content types. 资料来源：[examples/multimodal-demo/src/components/messages.tsx:1-60]()\n\n## Deployment Scenarios\n\n### Self-Hosted Server\n\nOrganizations requiring full control over their data can deploy Mem0 as a self-hosted solution. The self-hosted server includes a dashboard for management, API key generation, and configuration options. 资料来源：[README.md:60-80]()\n\n```bash\n# Quick start with bootstrap\ncd server && make bootstrap\n\n# Manual Docker deployment\ncd server && docker compose up -d\n```\n\n**Self-Hosted Features:**\n- Admin account creation via setup wizard\n- API key management through dashboard\n- Configuration for LLM and embedding providers\n- Request logging and analytics\n- Webhook support for event notifications\n\n### Cloud Platform\n\nThe Mem0 cloud platform provides a managed solution with additional features including project-based isolation, SSO/SAML authentication, and enterprise support. 资料来源：[README.md:50-60]()\n\n### Embedded SDK Integration\n\nFor applications requiring client-side or edge deployment, Mem0 provides lightweight SDKs:\n\n| Platform | Installation | Use Case |\n|----------|--------------|----------|\n| Python | `pip install mem0ai` | Backend services, data processing |\n| JavaScript/TypeScript | `npm install mem0ai` | Web applications, Node.js services |\n| CLI | `npm install -g @mem0/cli` | Local development, debugging |\n\n资料来源：[README.md:25-40]()\n\n## CLI Applications\n\nThe Mem0 CLI enables developers to manage memories directly from the terminal, useful for development, debugging, and automation tasks. 资料来源：[cli/node/README.md:1-80]()\n\n```bash\n# Initialize CLI configuration\nmem0 init\n\n# Add memories\nmem0 add \"User prefers dark mode\" --user-id alice\n\n# Search memories\nmem0 search \"What does Alice prefer?\" --user-id alice\n\n# Manage entities\nmem0 entity list users\nmem0 entity delete --user-id alice --force\n```\n\n**CLI Commands Reference:**\n\n| Command | Description | Key Flags |\n|---------|-------------|-----------|\n| `mem0 add` | Add a memory | `--user-id`, `--agent-id`, `--metadata` |\n| `mem0 search` | Search memories | `--user-id`, `--output` |\n| `mem0 list` | List all memories | `--user-id`, `--limit` |\n| `mem0 delete` | Delete memories | `--user-id`, `--force` |\n| `mem0 import` | Bulk import | JSON file support |\n| `mem0 config` | Manage settings | `show`, `get`, `set` |\n| `mem0 status` | Check connection | Project verification |\n| `mem0 event` | Monitor async events | `list`, `status` |\n\n## Evaluation and Benchmarking\n\nMem0 includes comprehensive evaluation tools for comparing different memory techniques and configurations. The evaluation framework supports multiple approaches including Mem0, Mem0+, RAG, and LangMem. 资料来源：[evaluation/README.md:50-100]()\n\n```bash\n# Run Mem0 experiments\nmake run-mem0-add\nmake run-mem0-search\n\n# Run Mem0+ with graph-based search\nmake run-mem0-plus-add\nmake run-mem0-plus-search\n\n# Run RAG experiments\nmake run-rag\nmake run-full-context\n\n# Run custom experiments\npython run_experiments.py --technique_type mem0 --method add\n```\n\n**Experiment Parameters:**\n\n| Parameter | Description | Valid Values |\n|-----------|-------------|---------------|\n| `--technique_type` | Memory technique | mem0, rag, langmem |\n| `--method` | Operation type | add, search |\n| `--chunk_size` | Processing chunk size | Integer |\n| `--top_k` | Results to retrieve | Integer |\n\n## Industry-Specific Applications\n\n### OpenClaw Platform Integration\n\nOpenClaw demonstrates how Mem0 integrates with specialized AI platforms for specific domains. The platform supports both hosted API mode and self-hosted open-source mode with configurable memory behaviors. 资料来源：[openclaw/README.md:1-50]()\n\n**Platform Mode Configuration:**\n| Key | Type | Description |\n|-----|------|-------------|\n| `apiKey` | string | Mem0 API key (required) |\n| `customInstructions` | string | Extraction rules |\n| `customCategories` | object | Category definitions |\n\n**Open-Source Mode Defaults:**\n| Component | Default Value |\n|-----------|---------------|\n| Embeddings | text-embedding-3-small |\n| Vector Store | Local SQLite |\n| LLM | gpt-5-mini |\n| Database Path | ~/.mem0/vector_store.db |\n\n### Support Inbox Automation\n\nAutomated support systems use Mem0 to track issue resolution history, maintain customer context across channels, and enable intelligent routing based on historical patterns.\n\n### Email Automation\n\nEmail-based workflows leverage Mem0's ability to maintain conversation context across email threads, automatically categorizing and prioritizing messages based on user history and past interactions.\n\n## Best Practices\n\n### Memory Structuring\n\nOrganize memories with appropriate metadata for optimal retrieval:\n\n```python\nmemory.add(\n    text=\"Customer's subscription expired\",\n    user_id=\"customer_123\",\n    metadata={\n        \"category\": \"billing\",\n        \"priority\": \"medium\",\n        \"timestamp\": \"2025-01-15\"\n    }\n)\n```\n\n### Privacy Considerations\n\n- Implement data retention policies\n- Use encryption for sensitive information\n- Leverage user consent mechanisms\n- Enable data export and deletion capabilities\n\n### Performance Optimization\n\n- Configure appropriate embedding models for your use case\n- Use hybrid search combining semantic and keyword matching\n- Implement caching for frequently accessed memories\n- Monitor request latency through the dashboard\n\n## Additional Resources\n\n- [Quick Start Guide](https://docs.mem0.ai/)\n- [API Reference](https://docs.mem0.ai/api-reference/)\n- [Self-Hosted Documentation](https://docs.mem0.ai/open-source/overview)\n- [CLI Reference](https://docs.mem0.ai/platform/cli)\n- [Platform Documentation](https://app.mem0.ai)\n\n---\n\n<a id='page-architecture'></a>\n\n## System Architecture\n\n### 相关页面\n\n相关主题：[Introduction to Mem0](#page-introduction), [Memory Operations](#page-memory-operations), [Python SDK](#page-python-sdk), [Vector Stores and Storage](#page-vector-stores)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/memory/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/main.py)\n- [mem0/memory/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/base.py)\n- [mem0/memory/storage.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/storage.py)\n- [server/main.py](https://github.com/mem0ai/mem0/blob/main/server/main.py)\n- [server/routers/__init__.py](https://github.com/mem0ai/mem0/blob/main/server/routers/__init__.py)\n- [mem0-ts/src/client/mem0.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.ts)\n- [server/README.md](https://github.com/mem0ai/mem0/blob/main/server/README.md)\n</details>\n\n# System Architecture\n\n## Overview\n\nMem0 is an intelligent memory layer designed for AI agents and applications. It provides persistent, scalable long-term memory capabilities that enable AI systems to retain, retrieve, and manage information across conversations and sessions.\n\n资料来源：[server/README.md](https://github.com/mem0ai/mem0/blob/main/server/README.md)\n\nThe architecture follows a modular design pattern with distinct layers for memory management, storage, API serving, and client implementations. This separation enables flexibility in deployment options and supports multiple use cases from embedded applications to cloud-based services.\n\n## High-Level Architecture\n\n```mermaid\ngraph TD\n    subgraph Client_Layer[\"Client Layer\"]\n        CLI[\"CLI Application<br/>mem0\"]\n        TS_Client[\"TypeScript Client<br/>mem0-ts\"]\n        Python_SDK[\"Python SDK<br/>mem0ai/mem0\"]\n    end\n    \n    subgraph API_Layer[\"API Layer\"]\n        Server[\"FastAPI Server<br/>server/main.py\"]\n        Routers[\"API Routers<br/>server/routers/\"]\n    end\n    \n    subgraph Memory_Core[\"Memory Core\"]\n        Main[\"Memory Manager<br/>mem0/memory/main.py\"]\n        Base[\"Base Memory<br/>mem0/memory/base.py\"]\n        Storage[\"Storage Engine<br/>mem0/memory/storage.py\"]\n    end\n    \n    subgraph Storage_Backend[\"Storage Backend\"]\n        VectorStore[\"Vector Store\"]\n        DB[\"Database\"]\n    end\n    \n    CLI --> Server\n    TS_Client --> Server\n    Python_SDK --> Main\n    Main --> Base\n    Main --> Storage\n    Storage --> VectorStore\n    Storage --> DB\n    Server --> Main\n```\n\n## Core Components\n\n### Memory Module Architecture\n\nThe memory module is the heart of the Mem0 system, implementing the core memory operations.\n\n资料来源：[mem0/memory/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/main.py)\n\n| Component | File | Purpose |\n|-----------|------|---------|\n| MemoryManager | `mem0/memory/main.py` | Orchestrates memory operations |\n| BaseMemory | `mem0/memory/base.py` | Abstract base class defining the memory interface |\n| Storage | `mem0/memory/storage.py` | Handles persistence and retrieval of memory data |\n\n### Base Memory Class\n\nThe base class defines the contract that all memory implementations must follow.\n\n资料来源：[mem0/memory/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/base.py)\n\n```mermaid\nclassDiagram\n    class BaseMemory {\n        <<abstract>>\n        +add() AddMemory\n        +search() SearchMemory\n        +get() GetMemory\n        +update() UpdateMemory\n        +delete() DeleteMemory\n        +list() ListMemories\n    }\n    \n    class MemoryManager {\n        +add()\n        +search()\n        +get()\n        +update()\n        +delete()\n        +list()\n        -storage: Storage\n    }\n    \n    BaseMemory <|-- MemoryManager\n```\n\n### Storage Engine\n\nThe storage layer handles the persistence of memory data using vector embeddings and traditional database storage.\n\n资料来源：[mem0/memory/storage.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/storage.py)\n\n#### Supported Storage Backends\n\n| Storage Type | Description |\n|--------------|-------------|\n| Vector Store | Embedding-based similarity search |\n| SQL Database | Structured data storage for metadata |\n| Memory | In-memory storage for testing |\n| Graph | Graph-based relationships (Mem0+) |\n\n## API Layer\n\n### Server Architecture\n\nThe server layer is built on FastAPI, providing RESTful endpoints for memory operations.\n\n资料来源：[server/main.py](https://github.com/mem0ai/mem0/blob/main/server/main.py)\n\n```mermaid\ngraph LR\n    subgraph Endpoints[\"API Endpoints\"]\n        A[\"Add Memory\"]\n        S[\"Search Memory\"]\n        G[\"Get Memory\"]\n        U[\"Update Memory\"]\n        D[\"Delete Memory\"]\n        L[\"List Memories\"]\n    end\n    \n    subgraph Router[\"Router Module\"]\n        R[\"server/routers/__init__.py\"]\n    end\n    \n    A --> R\n    S --> R\n    G --> R\n    U --> R\n    D --> R\n    L --> R\n    R --> MemoryCore[\"Memory Core\"]\n```\n\n### API Configuration\n\nThe system supports various configuration options for deployment flexibility.\n\n资料来源：[server/README.md](https://github.com/mem0ai/mem0/blob/main/server/README.md)\n\n| Parameter | Description | Default |\n|-----------|-------------|---------|\n| `OPENAI_API_KEY` | API key for GPT models and embeddings | Required |\n| `MEM0_API_KEY` | Mem0 API key for cloud features | Optional |\n| `MEM0_PROJECT_ID` | Project identifier | Optional |\n| `MEM0_ORGANIZATION_ID` | Organization identifier | Optional |\n| `MODEL` | LLM model for completions | `gpt-4o-mini` |\n| `EMBEDDING_MODEL` | Embedding model | `text-embedding-3-small` |\n| `ZEP_API_KEY` | Zep service API key | Optional |\n\n## Client Implementations\n\n### Python SDK\n\nThe Python SDK provides the primary interface for integrating Mem0 into applications.\n\n资料来源：[mem0ai/mem0](https://github.com/mem0ai/mem0/blob/main/mem0ai/mem0)\n\n```python\nfrom mem0 import Memory\n\nmemory = Memory()\nmemory.add(\"User prefers dark mode\", user_id=\"alice\")\nresults = memory.search(\"What are user preferences?\", user_id=\"alice\")\n```\n\n### TypeScript Client\n\nThe TypeScript implementation provides memory capabilities for JavaScript/TypeScript environments.\n\n资料来源：[mem0-ts/src/client/mem0.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.ts)\n\n```typescript\nimport { Memory } from \"mem0-ts\";\n\nconst memory = new Memory({\n  embedder: {\n    provider: \"openai\",\n    config: { apiKey: process.env.OPENAI_API_KEY }\n  }\n});\n```\n\n### CLI Application\n\nThe command-line interface provides direct access to memory operations.\n\n资料来源：[cli/README.md](https://github.com/mem0ai/mem0/blob/main/cli/README.md)\n\n| Command | Description |\n|---------|-------------|\n| `mem0 init` | Setup wizard for authentication |\n| `mem0 add` | Add memory from text, JSON, or file |\n| `mem0 search` | Search memories using natural language |\n| `mem0 list` | List memories with filters |\n| `mem0 get` | Retrieve specific memory by ID |\n| `mem0 update` | Update memory text or metadata |\n| `mem0 delete` | Delete memory or entity |\n| `mem0 import` | Bulk import from JSON file |\n\n#### CLI Agent Mode\n\nThe CLI supports agent mode for AI agent tool loops:\n\n```bash\nmem0 --agent search \"user preferences\" --user-id alice\nmem0 --agent add \"User prefers dark mode\" --user-id alice\n```\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n## Memory Techniques\n\nMem0 supports multiple memory retrieval techniques for different use cases.\n\n资料来源：[evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n\n```mermaid\ngraph TD\n    subgraph Techniques[\"Memory Techniques\"]\n        M0[\"Mem0<br/>Vector-based retrieval\"]\n        M0P[\"Mem0+<br/>Graph-based search\"]\n        RAG[\"RAG<br/>Chunk-based retrieval\"]\n        LM[\"LangMem<br/>Language model memory\"]\n    end\n    \n    subgraph Use_Cases[\"Use Cases\"]\n        UC1[\"Personal assistants\"]\n        UC2[\"Customer support\"]\n        UC3[\"Research tools\"]\n        UC4[\"Enterprise applications\"]\n    end\n    \n    M0 --> UC1\n    M0P --> UC2\n    RAG --> UC3\n    LM --> UC4\n```\n\n### Technique Comparison\n\n| Technique | Description | Best For |\n|-----------|-------------|----------|\n| Mem0 | Vector-based semantic search | General purpose memory |\n| Mem0+ | Graph-enhanced retrieval | Complex relationship queries |\n| RAG | Chunk-based retrieval | Document-heavy applications |\n| LangMem | LLM-native memory | Language model integration |\n\n## Data Models\n\n### Memory Entity Structure\n\n| Field | Type | Description |\n|-------|------|-------------|\n| `id` | string | Unique memory identifier |\n| `memory` | string | Memory content text |\n| `user_id` | string | Associated user identifier |\n| `agent_id` | string | Associated agent identifier |\n| `app_id` | string | Associated application identifier |\n| `run_id` | string | Associated run identifier |\n| `metadata` | object | Custom metadata key-value pairs |\n| `created_at` | datetime | Creation timestamp |\n| `updated_at` | datetime | Last update timestamp |\n\n### Evaluation Metrics\n\nThe system tracks multiple metrics for performance evaluation.\n\n资料来源：[evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n\n| Metric | Description |\n|--------|-------------|\n| BLEU Score | Text similarity measure |\n| F1 Score | Precision/recall balance |\n| LLM Score | Judge-based evaluation |\n\n## Deployment Options\n\n### Local/Embedded Mode\n\nFor applications requiring local-only memory:\n\n- SQLite-based vector store: `~/.mem0/vector_store.db`\n- History database: `~/.mem0/history.db`\n- Memory consolidation state: `<pluginStateDir>/dream-state.json`\n\n资料来源：[openclaw/README.md](https://github.com/mem0ai/mem0/blob/main/openclaw/README.md)\n\n### Cloud Mode\n\nFor managed Mem0 cloud services:\n\n- Requires `MEM0_API_KEY`\n- Project and organization configuration\n- Scalable vector storage\n\n### Server Deployment\n\nThe FastAPI server can be deployed independently:\n\n```bash\n# Start server\npython server/main.py\n\n# Configure via environment variables\n# - Set API keys\n# - Configure storage backends\n# - Set model preferences\n```\n\n资料来源：[server/main.py](https://github.com/mem0ai/mem0/blob/main/server/main.py)\n\n## Vercel AI SDK Integration\n\nMem0 provides seamless integration with the Vercel AI SDK for streaming responses with memory.\n\n资料来源：[vercel-ai-sdk/README.md](https://github.com/mem0ai/mem0/blob/main/vercel-ai-sdk/README.md)\n\n```typescript\nconst mem0 = createMem0({\n  config: {\n    // Model configuration options\n  }\n});\n```\n\n### Best Practices for Vercel Integration\n\n1. **User Identification**: Always provide a unique `user_id` for consistent memory retrieval\n2. **Context Management**: Balance context window sizes with memory requirements\n3. **Error Handling**: Implement proper error handling for memory operations\n4. **Memory Cleanup**: Regularly clean up unused memory contexts\n\n## Evaluation Framework\n\nThe evaluation module provides comprehensive testing capabilities.\n\n资料来源：[evaluation/README.md](https://github.com/mem0ai/mem0/blob/main/evaluation/README.md)\n\n### Running Experiments\n\n```bash\n# Run Mem0 experiments\nmake run-mem0-add\nmake run-mem0-search\n\n# Run Mem0+ experiments\nmake run-mem0-plus-add\nmake run-mem0-plus-search\n\n# Run RAG experiments\nmake run-rag\n```\n\n### Evaluation Command-Line Parameters\n\n| Parameter | Description | Default |\n|-----------|-------------|---------|\n| `--technique_type` | Memory technique | `mem0` |\n| `--method` | Method to use | `add` |\n| `--chunk_size` | Processing chunk size | `1000` |\n| `--top_k` | Top memories to retrieve | `30` |\n| `--is_graph` | Use graph-based search | `False` |\n\n## System Flow Diagrams\n\n### Memory Addition Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant API\n    participant MemoryManager\n    participant Storage\n    participant VectorStore\n    \n    Client->>API: Add memory request\n    API->>MemoryManager: Process memory\n    MemoryManager->>MemoryManager: Extract facts\n    MemoryManager->>Storage: Store memory\n    Storage->>VectorStore: Generate embeddings\n    VectorStore->>Storage: Store vectors\n    Storage->>MemoryManager: Confirm storage\n    MemoryManager->>API: Return memory ID\n    API->>Client: Success response\n```\n\n### Memory Search Flow\n\n```mermaid\nsequenceDiagram\n    participant Client\n    participant API\n    participant MemoryManager\n    participant Storage\n    participant VectorStore\n    \n    Client->>API: Search request\n    API->>MemoryManager: Process query\n    MemoryManager->>VectorStore: Generate query embedding\n    VectorStore->>MemoryManager: Return similar memories\n    MemoryManager->>API: Format results\n    API->>Client: Return search results\n```\n\n## Security Considerations\n\n### API Key Management\n\n- Use environment variables for sensitive credentials\n- Rotate API keys periodically\n- Implement proper access controls for production deployments\n\n### Data Privacy\n\n- User data isolation via `user_id` scoping\n- Support for entity-level deletion\n- Optional metadata encryption for sensitive information\n\n## Extensibility Points\n\nThe architecture supports extension through:\n\n1. **Custom Storage Backends**: Implement the storage interface for new backends\n2. **Custom Embedding Providers**: Add support for alternative embedding models\n3. **Custom Memory Techniques**: Extend base class for specialized retrieval\n4. **Plugin System**: OpenClaw integration for additional capabilities\n\n## References\n\n- Main Repository: [mem0ai/mem0](https://github.com/mem0ai/mem0)\n- Documentation: [docs.mem0.ai](https://docs.mem0.ai)\n- Paper Citation: [arXiv:2504.19413](https://arxiv.org/abs/2504.19413)\n\n---\n\n<a id='page-memory-operations'></a>\n\n## Memory Operations\n\n### 相关页面\n\n相关主题：[System Architecture](#page-architecture), [AI Model Integration](#page-ai-integration), [Vector Stores and Storage](#page-vector-stores), [Python SDK](#page-python-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/memory/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/main.py)\n- [docs/core-concepts/memory-operations/add.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/add.mdx)\n- [docs/core-concepts/memory-operations/search.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/search.mdx)\n- [docs/core-concepts/memory-operations/update.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/update.mdx)\n- [docs/core-concepts/memory-operations/delete.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/delete.mdx)\n- [docs/core-concepts/memory-types.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-types.mdx)\n- [docs/open-source/features/async-memory.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/async-memory.mdx)\n- [docs/open-source/features/metadata-filtering.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/metadata-filtering.mdx)\n- [docs/open-source/features/custom-instructions.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/custom-instructions.mdx)\n</details>\n\n# Memory Operations\n\nMemory operations are the core CRUD (Create, Read, Update, Delete) interactions that power the Mem0 memory system. These operations enable AI agents to store, retrieve, modify, and delete persistent memory across user sessions, agent executions, and application contexts. The memory operations layer abstracts the complexity of vector storage, semantic indexing, and multi-entity management into a unified API that supports both synchronous and asynchronous execution patterns.\n\n## Overview\n\nThe Mem0 memory system provides five fundamental operations that form the backbone of persistent memory management. Each operation is designed to work with multiple entity scopes, including user-level, agent-level, application-level, and run-level contexts. The operations support rich metadata filtering, custom instructions for memory processing, and both blocking and non-blocking execution modes for handling large-scale memory operations.\n\nMemory operations in Mem0 are built on a layered architecture where the core memory module (`mem0/memory/main.py`) handles the business logic, while underlying vector stores and databases manage persistence. This separation allows Mem0 to support different deployment scenarios from local SQLite-based storage to cloud-hosted vector databases.\n\n## Core Memory Operations\n\n### Add Memory\n\nThe **Add** operation is the primary mechanism for storing new information in the memory system. When a memory is added, Mem0 performs several processing steps including embedding generation, fact extraction, and semantic categorization before storing the data in the appropriate vector store.\n\n**Function signature and parameters:**\n\n```python\ndef add(\n    messages: str | list[dict],\n    user_id: str | None = None,\n    agent_id: str | None = None,\n    app_id: str | None = None,\n    run_id: str | None = None,\n    metadata: dict | None = None,\n    filter_version: str | None = \"v1.0\",\n    prompt: str | None = None,\n    max_items: int | None = None\n) -> dict\n```\n\n资料来源：[mem0/memory/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/main.py)\n\n**Operation workflow:**\n\n```mermaid\ngraph TD\n    A[Input: messages + entity identifiers] --> B[Validate input and entity scope]\n    B --> C[Generate vector embeddings]\n    C --> D[Extract facts using LLM]\n    D --> E[Apply custom instructions if configured]\n    E --> F[Store in vector store with metadata]\n    F --> G[Return memory IDs and stored content]\n```\n\n**Adding memories via CLI:**\n\n```bash\n# Add a simple text memory\nmem0 add \"I prefer dark mode\" --user-id alice\n\n# Add from a JSON messages array\nmem0 add --file conversation.json --user-id alice\n\n# Add from stdin\necho \"Loves hiking on weekends\" | mem0 add --user-id alice\n\n# Add with metadata\nmem0 add \"User prefers TypeScript over JavaScript\" --metadata '{\"category\": \"preference\", \"priority\": \"high\"}'\n```\n\n资料来源：[cli/python/README.md](https://github.com/mem0ai/mem0/blob/main/cli/python/README.md)\n\n### Search Memory\n\nThe **Search** operation retrieves relevant memories based on natural language queries. Mem0 converts the query into a vector embedding and performs similarity search against stored memories, returning results ranked by relevance. The search operation supports filtering by entity scope, metadata attributes, and memory types.\n\n**Function signature and parameters:**\n\n```python\ndef search(\n    query: str,\n    user_id: str | None = None,\n    agent_id: str | None = None,\n    app_id: str | None = None,\n    run_id: str | None = None,\n    version: str | None = \"v1.1\",\n    limit: int = 10,\n    category: str | None = None,\n    filter: dict | None = None,\n    rerank: bool = False\n) -> list[dict]\n```\n\n资料来源：[docs/core-concepts/memory-operations/search.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/search.mdx)\n\n**Search with metadata filtering:**\n\nMetadata filtering allows precise memory retrieval based on specific attributes stored with each memory. This is particularly useful for retrieving memories that match certain criteria without relying solely on semantic similarity.\n\n```python\nresult = memory.search(\n    query=\"user preferences\",\n    user_id=\"alice\",\n    filter={\n        \"category\": \"preference\",\n        \"priority\": {\"$eq\": \"high\"}\n    }\n)\n```\n\n资料来源：[docs/open-source/features/metadata-filtering.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/metadata-filtering.mdx)\n\n**CLI search examples:**\n\n```bash\n# Basic semantic search\nmem0 search \"What are Alice's preferences?\" --user-id alice\n\n# Search with output formatting\nmem0 search \"preferences\" --output json --top-k 20\n\n# Search within specific scope\nmem0 search \"agent behavior\" --agent-id agent-123\n```\n\n### Get Memory\n\nThe **Get** operation retrieves a specific memory by its unique identifier. Unlike search which performs semantic similarity, get provides direct access to a known memory record for viewing, editing, or deletion operations.\n\n**CLI usage:**\n\n```bash\n# Retrieve a specific memory by ID\nmem0 get 7b3c1a2e-4d5f-6789-abcd-ef0123456789\n\n# Get memory with JSON output for AI agent processing\nmem0 get 7b3c1a2e-4d5f-6789-abcd-ef0123456789 --output json\n```\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n### Update Memory\n\nThe **Update** operation modifies the content or metadata of an existing memory while preserving the memory's history and relationships. The update operation preserves the original memory ID and maintains audit trails of modifications.\n\n**Function signature and parameters:**\n\n```python\ndef update(\n    memory_id: str,\n    data: str | None = None,\n    metadata: dict | None = None,\n    user_id: str | None = None\n) -> dict\n```\n\n资料来源：[docs/core-concepts/memory-operations/update.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/update.mdx)\n\n**Update operation workflow:**\n\n```mermaid\ngraph TD\n    A[Update request with memory_id] --> B[Locate existing memory record]\n    B --> C[Apply content or metadata changes]\n    C --> D[Update vector embeddings if content changed]\n    D --> E[Preserve modification history]\n    E --> F[Return updated memory object]\n```\n\n**CLI update examples:**\n\n```bash\n# Update memory text\nmem0 update <memory-id> \"Updated preference text\"\n\n# Update metadata only\nmem0 update <memory-id> --metadata '{\"priority\": \"high\"}'\n\n# Update via stdin\necho \"new text\" | mem0 update <memory-id>\n```\n\n### Delete Memory\n\nThe **Delete** operation removes memories from the storage system. Mem0 supports multiple deletion strategies including single memory deletion, bulk deletion by scope, and entity-level deletion that removes all associated memories.\n\n**Function signature and parameters:**\n\n```python\ndef delete(\n    memory_id: str | None = None,\n    user_id: str | None = None,\n    agent_id: str | None = None,\n    app_id: str | None = None,\n    run_id: str | None = None,\n    delete_all: bool = False,\n    confirm: bool = False\n) -> dict\n```\n\n资料来源：[docs/core-concepts/memory-operations/delete.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-operations/delete.mdx)\n\n**CLI delete examples:**\n\n```bash\n# Delete a single memory\nmem0 delete <memory-id>\n\n# Delete all memories for a user (with confirmation)\nmem0 delete --all --user-id alice\n\n# Delete all memories project-wide\nmem0 delete --all --project --force\n\n# Preview what would be deleted\nmem0 delete --all --user-id alice --dry-run\n```\n\n**Delete flags reference:**\n\n| Flag | Description |\n|------|-------------|\n| `--all` | Delete all memories matching scope filters |\n| `--entity` | Delete the entity and all its memories |\n| `--project` | With `--all`: delete all memories project-wide |\n| `--dry-run` | Preview without deleting |\n| `--force` | Skip confirmation prompt |\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n### List Memories\n\nThe **List** operation retrieves memories with optional filters, pagination, and sorting. Unlike search which returns semantically relevant results, list provides comprehensive enumeration of stored memories within specified scopes.\n\n**CLI usage:**\n\n```bash\n# List all memories for a user\nmem0 list --user-id alice\n\n# List with pagination\nmem0 list --user-id alice --page 1 --page-size 50\n\n# List in JSON format for agent consumption\nmem0 list --user-id alice --output json\n```\n\n## Entity Scopes\n\nMem0 organizes memories within hierarchical entity scopes that provide logical separation and access control. Each memory belongs to at least one entity identifier, creating an ownership hierarchy.\n\n```mermaid\ngraph TB\n    A[Memory Record] --> B[user_id]\n    A --> C[agent_id]\n    A --> D[app_id]\n    A --> E[run_id]\n    \n    B --> F[User Entity]\n    C --> G[Agent Entity]\n    D --> H[Application Entity]\n    E --> I[Run Entity]\n    \n    F --> J[Project/Organization]\n    G --> J\n    H --> J\n    I --> J\n```\n\n**Entity scope parameters:**\n\n| Parameter | Description | Use Case |\n|-----------|-------------|----------|\n| `user_id` | Identifies the end user | Personal preferences, history |\n| `agent_id` | Identifies the AI agent | Agent behavior patterns, policies |\n| `app_id` | Identifies the application | App-specific configurations |\n| `run_id` | Identifies a session/run | Conversation context within a session |\n\n资料来源：[docs/core-concepts/memory-types.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-types.mdx)\n\n## Asynchronous Memory Operations\n\nFor large-scale memory operations that may take extended time to complete, Mem0 provides asynchronous execution modes. Async operations return immediately with an event ID that can be used to track progress and retrieve results.\n\n**Async operation support:**\n\n| Operation | Async Support | Return Value |\n|-----------|---------------|--------------|\n| `add` | Yes (bulk adds) | Event ID |\n| `search` | Yes | Event ID |\n| `delete` | Yes (bulk deletes) | Event ID |\n| `update` | No | Updated memory |\n| `get` | No | Memory object |\n| `list` | Yes | Event ID |\n\n资料来源：[docs/open-source/features/async-memory.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/async-memory.mdx)\n\n**Event monitoring via CLI:**\n\n```bash\n# List recent background processing events\nmem0 event list\n\n# Check the status of a specific event\nmem0 event status <event-id>\n```\n\n## Memory Types\n\nMem0 supports different memory types that serve distinct purposes in AI agent architectures. Each memory type has specific characteristics optimized for different retrieval patterns and use cases.\n\n**Memory type reference:**\n\n| Type | Purpose | Retrieval Pattern | Use Case |\n|------|---------|-------------------|----------|\n| `semantic` | Store facts and preferences | Semantic similarity search | User preferences, facts |\n| `episodic` | Record events and conversations | Time-based, sequential | Conversation history |\n| `procedural` | Store agent behaviors/actions | Task-based patterns | Agent workflows |\n| `long-term` | Persistent cross-session memory | Multi-dimensional search | User profiles, knowledge |\n\n资料来源：[docs/core-concepts/memory-types.mdx](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-types.mdx)\n\n## Metadata and Filtering\n\nMem0 supports rich metadata storage and filtering capabilities that enable precise memory retrieval beyond semantic similarity. Metadata can include arbitrary key-value pairs that are indexed for efficient filtering.\n\n**Metadata structure example:**\n\n```python\nmemory = {\n    \"id\": \"mem_xxxxx\",\n    \"memory\": \"User prefers dark mode for the interface\",\n    \"metadata\": {\n        \"category\": \"preference\",\n        \"priority\": \"high\",\n        \"source\": \"explicit_feedback\",\n        \"tags\": [\"ui\", \"theme\", \"dark-mode\"]\n    },\n    \"created_at\": \"2025-01-15T10:30:00Z\",\n    \"user_id\": \"alice\"\n}\n```\n\n**Filter operators supported:**\n\n| Operator | Description | Example |\n|----------|-------------|---------|\n| `$eq` | Equals | `{\"priority\": {\"$eq\": \"high\"}}` |\n| `$ne` | Not equals | `{\"status\": {\"$ne\": \"archived\"}}` |\n| `$in` | In array | `{\"category\": {\"$in\": [\"fact\", \"preference\"]}}` |\n| `$nin` | Not in array | `{\"source\": {\"$nin\": [\"deprecated\"]}}` |\n| `$gt`, `$gte` | Greater than (or equal) | `{\"score\": {\"$gt\": 0.8}}` |\n| `$lt`, `$lte` | Less than (or equal) | `{\"priority\": {\"$lte\": 5}}` |\n\n资料来源：[docs/open-source/features/metadata-filtering.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/metadata-filtering.mdx)\n\n## Custom Instructions\n\nCustom instructions provide a mechanism to customize how Mem0 processes and interprets memories. These instructions guide the LLM in extracting relevant facts, categorizing information, and determining storage behavior.\n\n**Configuration example:**\n\n```python\nmemory = Memory()\n\n# Set custom instructions for the memory instance\nmemory.configure(\n    custom_instructions=\"Focus on extracting user preferences about product features. \"\n                        \"Categorize memories by product area. \"\n                        \"Prioritize recent explicit feedback over implicit observations.\"\n)\n\n# Add memory with custom processing\nresult = memory.add(\n    messages=\"I really love the new dark mode feature in the settings panel\",\n    user_id=\"alice\"\n)\n```\n\n资料来源：[docs/open-source/features/custom-instructions.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/custom-instructions.mdx)\n\n## Bulk Import\n\nMem0 supports bulk importing of memories from JSON files, enabling migration from other systems or initial data population.\n\n**Import file format:**\n\n```json\n[\n  {\n    \"memory\": \"User prefers dark mode\",\n    \"user_id\": \"alice\",\n    \"metadata\": {\"category\": \"preference\"}\n  },\n  {\n    \"text\": \"Agent uses fallback strategy when API fails\",\n    \"agent_id\": \"agent-123\",\n    \"metadata\": {\"behavior\": \"error-handling\"}\n  },\n  {\n    \"content\": \"Application has rate limiting enabled\",\n    \"app_id\": \"app-production\",\n    \"metadata\": {\"configuration\": true}\n  }\n]\n```\n\n**CLI import command:**\n\n```bash\nmem0 import data.json --user-id alice\n```\n\n资料来源：[cli/node/README.md](https://github.com/mem0ai/mem0/blob/main/cli/node/README.md)\n\n## Agent Mode\n\nThe CLI supports an agent mode that formats output specifically for AI agent tool loops. This mode returns structured JSON that can be easily parsed by AI systems for decision-making.\n\n**Agent mode usage:**\n\n```bash\nmem0 --agent search \"user preferences\" --user-id alice\nmem0 --agent add \"User prefers dark mode\" --user-id alice\nmem0 --agent list --user-id alice\n```\n\n资料来源：[cli/python/README.md](https://github.com/mem0ai/mem0/blob/main/cli/python/README.md)\n\n## Dashboard Memory Management\n\nThe Mem0 dashboard provides a web-based interface for viewing, searching, and managing memories. The memory operations are accessible through a visual interface that includes pagination, detail views, and deletion confirmation modals.\n\n**Dashboard features:**\n\n- Paginated memory listing with navigation controls\n- Memory detail view showing content, ID, timestamps, and metadata\n- Inline deletion with confirmation modal\n- Search functionality within the memories page\n\n资料来源：[server/dashboard/src/app/(root)/dashboard/memories/page.tsx](https://github.com/mem0ai/mem0/blob/main/server/dashboard/src/app/(root)/dashboard/memories/page.tsx)\n\n## Configuration and Status\n\nThe Mem0 CLI provides commands for managing configuration and verifying connectivity.\n\n**Configuration commands:**\n\n```bash\nmem0 config show              # Display current config (secrets redacted)\nmem0 config get api_key       # Get a specific value\nmem0 config set user_id bob   # Set a value\n\nmem0 status                   # Verify API connection and display project\nmem0 version                  # Print CLI version\n```\n\n## Operation Flow Summary\n\n```mermaid\ngraph LR\n    A[Client Request] --> B{Operation Type}\n    \n    B -->|add| C[Process & Store]\n    B -->|search| D[Embed Query & Search]\n    B -->|get| E[Direct Lookup]\n    B -->|update| F[Modify & Re-index]\n    B -->|delete| G[Remove from Store]\n    \n    C --> H[(Vector Store)]\n    D --> H\n    E --> H\n    F --> H\n    G --> H\n    \n    C --> I[Event ID]\n    D --> J[Results]\n    E --> K[Memory Object]\n    F --> K\n    G --> L[Confirmation]\n```\n\n## Error Handling\n\nMemory operations may encounter various error conditions that should be handled appropriately in client applications.\n\n**Common error scenarios:**\n\n| Error | Cause | Resolution |\n|-------|-------|------------|\n| `EntityNotFoundError` | Referenced user/agent/app doesn't exist | Verify entity IDs before operations |\n| `MemoryNotFoundError` | Memory ID doesn't exist | Check memory ID or use search |\n| `ValidationError` | Invalid input format | Validate request parameters |\n| `RateLimitError` | API rate limit exceeded | Implement exponential backoff |\n| `ConnectionError` | Network or API endpoint unavailable | Retry with circuit breaker |\n\n## See Also\n\n- [Memory Types](https://github.com/mem0ai/mem0/blob/main/docs/core-concepts/memory-types.mdx) - Understanding semantic, episodic, procedural, and long-term memory\n- [Async Memory](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/async-memory.mdx) - Large-scale asynchronous operations\n- [Metadata Filtering](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/metadata-filtering.mdx) - Advanced filtering capabilities\n- [Custom Instructions](https://github.com/mem0ai/mem0/blob/main/docs/open-source/features/custom-instructions.mdx) - Customizing memory processing behavior\n\n---\n\n<a id='page-ai-integration'></a>\n\n## AI Model Integration\n\n### 相关页面\n\n相关主题：[Memory Operations](#page-memory-operations), [Embedding Models](#page-embeddings), [System Architecture](#page-architecture)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/llms/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/llms/base.py)\n- [mem0/llms/openai.py](https://github.com/mem0ai/mem0/blob/main/mem0/llms/openai.py)\n- [mem0/llms/anthropic.py](https://github.com/mem0ai/mem0/blob/main/mem0/llms/anthropic.py)\n- [mem0/llms/azure_openai.py](https://github.com/mem0ai/mem0/blob/main/mem0/llms/azure_openai.py)\n- [mem0/llms/gemini.py](https://github.com/mem0ai/mem0/blob/main/mem0/llms/gemini.py)\n- [mem0/configs/llms/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/configs/llms/base.py)\n- [mem0/configs/llms/__init__.py](https://github.com/mem0ai/mem0/blob/main/mem0/configs/llms/__init__.py)\n- [docs/components/llms/overview.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/llms/overview.mdx)\n- [docs/components/llms/models/openai.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/llms/models/openai.mdx)\n- [docs/components/llms/models/anthropic.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/llms/models/anthropic.mdx)\n</details>\n\n# AI Model Integration\n\n## Overview\n\nThe AI Model Integration module in mem0 provides a unified abstraction layer for interacting with various large language model (LLM) providers. This architecture enables seamless switching between different AI backends while maintaining a consistent interface for memory operations. 资料来源：[docs/components/llms/overview.mdx:1-5]()\n\n## Architecture\n\nThe integration follows a **Provider Pattern** with a base class defining the contract and provider-specific implementations extending it.\n\n```mermaid\ngraph TD\n    A[mem0 Core] --> B[LLM Base Interface]\n    B --> C[OpenAI Provider]\n    B --> D[Anthropic Provider]\n    B --> E[Azure OpenAI Provider]\n    B --> F[Gemini Provider]\n    \n    C --> G[OpenAI API]\n    D --> H[Anthropic API]\n    E --> I[Azure Cognitive Services]\n    F --> J[Google AI API]\n```\n\n## Supported Providers\n\n| Provider | Model Class | API Type | Status |\n|----------|-------------|----------|--------|\n| OpenAI | `OpenAILargeLanguageModel` | REST | Production |\n| Anthropic | `AnthropicLargeLanguageModel` | REST | Production |\n| Azure OpenAI | `AzureOpenAILargeLanguageModel` | REST | Production |\n| Google Gemini | `GeminiLargeLanguageModel` | REST | Production |\n\n资料来源：[mem0/llms/base.py:1-20]()\n\n## Base Interface\n\nAll LLM providers inherit from `LargeLanguageModel` base class which defines the core contract:\n\n```python\nclass LargeLanguageModel(ABC):\n    @abstractmethod\n    def generate_response(self, messages, **kwargs):\n        pass\n    \n    @abstractmethod\n    def get_model_name(self):\n        pass\n```\n\n资料来源：[mem0/llms/base.py:15-30]()\n\n### Core Methods\n\n| Method | Purpose | Parameters |\n|--------|---------|------------|\n| `generate_response` | Generate text completion | `messages`, `**kwargs` |\n| `get_model_name` | Return model identifier | None |\n\n## Provider Implementations\n\n### OpenAI Integration\n\nThe OpenAI provider supports GPT-4, GPT-4 Turbo, and GPT-3.5 Turbo models through the OpenAI API.\n\n```python\nclass OpenAILargeLanguageModel(LargeLanguageModel):\n    def __init__(\n        self,\n        model: str = \"gpt-4\",\n        api_key: str = None,\n        temperature: float = 0.7,\n        max_tokens: int = 2000,\n        **kwargs\n    ):\n```\n\n资料来源：[mem0/llms/openai.py:10-25]()\n\n**Configuration Parameters:**\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `model` | `str` | `\"gpt-4\"` | Model identifier |\n| `api_key` | `str` | `None` | OpenAI API key |\n| `temperature` | `float` | `0.7` | Response randomness |\n| `max_tokens` | `int` | `2000` | Maximum response length |\n\n**Environment Variable:** `OPENAI_API_KEY`\n\n资料来源：[docs/components/llms/models/openai.mdx:1-15]()\n\n### Anthropic Integration\n\nThe Anthropic provider enables access to Claude models through the Anthropic API.\n\n```python\nclass AnthropicLargeLanguageModel(LargeLanguageModel):\n    def __init__(\n        self,\n        model: str = \"claude-3-5-sonnet-20241022\",\n        api_key: str = None,\n        temperature: float = 0.7,\n        max_tokens: int = 2000,\n        **kwargs\n    ):\n```\n\n资料来源：[mem0/llms/anthropic.py:10-25]()\n\n**Supported Models:**\n\n| Model | Context Window | Best For |\n|-------|----------------|----------|\n| `claude-3-5-sonnet-20241022` | 200K tokens | Balanced performance |\n| `claude-3-opus-20240229` | 200K tokens | Complex reasoning |\n| `claude-3-haiku-20240307` | 200K tokens | Fast, cost-effective |\n\n**Environment Variable:** `ANTHROPIC_API_KEY`\n\n资料来源：[docs/components/llms/models/anthropic.mdx:1-20]()\n\n### Azure OpenAI Integration\n\nAzure OpenAI provides enterprise-grade access with compliance features and regional deployment options.\n\n```python\nclass AzureOpenAILargeLanguageModel(LargeLanguageModel):\n    def __init__(\n        self,\n        model: str = \"gpt-4\",\n        api_key: str = None,\n        azure_endpoint: str = None,\n        api_version: str = \"2024-02-01\",\n        temperature: float = 0.7,\n        max_tokens: int = 2000,\n        **kwargs\n    ):\n```\n\n资料来源：[mem0/llms/azure_openai.py:10-30]()\n\n**Azure-Specific Parameters:**\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `azure_endpoint` | `str` | Yes | Azure endpoint URL |\n| `api_version` | `str` | Yes | API version string |\n| `azure_deployment` | `str` | No | Deployment name |\n\n**Environment Variables:**\n- `AZURE_OPENAI_API_KEY`\n- `AZURE_OPENAI_ENDPOINT`\n\n### Google Gemini Integration\n\nThe Gemini provider integrates with Google AI's Gemini models for multimodal capabilities.\n\n```python\nclass GeminiLargeLanguageModel(LargeLanguageModel):\n    def __init__(\n        self,\n        model: str = \"gemini-2.0-flash-exp\",\n        api_key: str = None,\n        temperature: float = 0.7,\n        max_tokens: int = 2000,\n        **kwargs\n    ):\n```\n\n资料来源：[mem0/llms/gemini.py:10-25]()\n\n**Supported Models:**\n\n| Model | Context Window | Features |\n|-------|----------------|----------|\n| `gemini-2.0-flash-exp` | 1M tokens | Latest, fastest |\n| `gemini-1.5-pro` | 1M tokens | Long context |\n| `gemini-1.5-flash` | 1M tokens | Balanced |\n\n**Environment Variable:** `GEMINI_API_KEY`\n\n## Configuration System\n\n### Base Configuration\n\nAll LLM configurations inherit from `LLMConfig` using Pydantic for validation:\n\n```python\nclass LLMConfig(BaseModel):\n    provider: str\n    model: str\n    temperature: float = 0.7\n    max_tokens: int = 2000\n    extra_params: dict = {}\n```\n\n资料来源：[mem0/configs/llms/base.py:1-20]()\n\n### Configuration Factory\n\nThe `LLMConfigs` class provides a centralized configuration registry:\n\n```python\nclass LLMConfigs:\n    @staticmethod\n    def get_config(provider: str) -> LLMConfig:\n        # Returns provider-specific configuration\n        pass\n```\n\n资料来源：[mem0/configs/llms/__init__.py:1-30]()\n\n## Usage Patterns\n\n### Direct Instantiation\n\n```python\nfrom mem0.llms.openai import OpenAILargeLanguageModel\n\nllm = OpenAILargeLanguageModel(\n    model=\"gpt-4\",\n    temperature=0.3,\n    max_tokens=1000\n)\n\nresponse = llm.generate_response(messages=[\n    {\"role\": \"user\", \"content\": \"Summarize my notes\"}\n])\n```\n\n### Configuration-Based\n\n```python\nfrom mem0.configs.llms import LLMConfigs\n\nconfig = LLMConfigs.get_config(\"openai\")\nllm = config.initialize()\n```\n\n## Message Format\n\nAll providers accept a standardized message format:\n\n```python\nmessages = [\n    {\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n    {\"role\": \"user\", \"content\": \"What is mem0?\"},\n    {\"role\": \"assistant\", \"content\": \"Mem0 is a memory system...\"},\n    {\"role\": \"user\", \"content\": \"Tell me more\"}\n]\n```\n\n| Role | Description |\n|------|-------------|\n| `system` | System-level instructions |\n| `user` | User input messages |\n| `assistant` | Model responses |\n\n## Error Handling\n\nAll LLM providers implement consistent error handling:\n\n```python\ntry:\n    response = llm.generate_response(messages)\nexcept AuthenticationError:\n    # Handle invalid API key\n    pass\nexcept RateLimitError:\n    # Handle rate limiting\n    pass\nexcept APIConnectionError:\n    # Handle connection issues\n    pass\n```\n\n## Extending the Framework\n\nTo add a new LLM provider:\n\n1. Create a new class inheriting from `LargeLanguageModel`\n2. Implement `generate_response()` and `get_model_name()` methods\n3. Add provider-specific configuration in `mem0/configs/llms/`\n4. Register the provider in the configuration factory\n\n```python\nclass CustomLLM(LargeLanguageModel):\n    def __init__(self, model: str = \"custom-model\", **kwargs):\n        self.model = model\n    \n    def generate_response(self, messages, **kwargs):\n        # Implementation\n        pass\n    \n    def get_model_name(self):\n        return self.model\n```\n\n## Security Considerations\n\n- API keys should be provided via environment variables, not hardcoded\n- Rate limiting is handled by the underlying provider APIs\n- Azure OpenAI supports managed identity for enterprise deployments\n- Gemini supports API key restrictions in Google Cloud Console\n\n---\n\n<a id='page-vector-stores'></a>\n\n## Vector Stores and Storage\n\n### 相关页面\n\n相关主题：[System Architecture](#page-architecture), [Embedding Models](#page-embeddings), [Memory Operations](#page-memory-operations)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/vector_stores/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/base.py)\n- [mem0/vector_stores/pinecone.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/pinecone.py)\n- [mem0/vector_stores/qdrant.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/qdrant.py)\n- [mem0/vector_stores/chroma.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/chroma.py)\n- [mem0/vector_stores/pgvector.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/pgvector.py)\n- [mem0/vector_stores/weaviate.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/weaviate.py)\n- [mem0/vector_stores/redis.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/redis.py)\n- [mem0/vector_stores/configs.py](https://github.com/mem0ai/mem0/blob/main/mem0/vector_stores/configs.py)\n- [docs/components/vectordbs/overview.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/vectordbs/overview.mdx)\n- [docs/components/vectordbs/config.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/vectordbs/config.mdx)\n</details>\n\n# Vector Stores and Storage\n\n## Overview\n\nVector stores in Mem0 provide the foundational persistence layer for semantic memory storage and retrieval. Mem0 supports multiple vector database backends, allowing users to choose the storage solution that best fits their infrastructure requirements, scale needs, and operational constraints.\n\nThe vector store system enables:\n\n- **Semantic Search**: Store memory embeddings and retrieve relevant memories based on cosine similarity\n- **Multi-Provider Support**: Integrate with popular vector databases including Pinecone, Qdrant, Chroma, PGVector, Weaviate, and Redis\n- **Unified Interface**: Consistent API across all providers through an abstract base class\n- **Metadata Filtering**: Filter memories by user_id, agent_id, run_id, and custom metadata\n- **Scalability**: Support for both local development (Chroma) and production-scale deployments (Pinecone, Qdrant)\n\n资料来源：[docs/components/vectordbs/overview.mdx]()\n\n## Architecture\n\nMem0 implements a provider-based architecture for vector stores. The system consists of:\n\n1. **Base Vector Store Interface**: Abstract class defining the contract all providers must implement\n2. **Provider Implementations**: Concrete implementations for each supported vector database\n3. **Configuration System**: Provider-specific configuration management\n4. **Factory Pattern**: Dynamic instantiation based on provider selection\n\n```mermaid\ngraph TD\n    A[Mem0 Memory Core] --> B[VectorStoreFactory]\n    B --> C[BaseVectorStore]\n    C --> D[Pinecone]\n    C --> E[Qdrant]\n    C --> F[Chroma]\n    C --> G[PGVector]\n    C --> H[Weaviate]\n    C --> I[Redis]\n    \n    J[Embedding Service] --> K[Vector Store]\n    K --> L[Semantic Search Results]\n```\n\n资料来源：[mem0/vector_stores/base.py]()\n\n## Base Vector Store Interface\n\nAll vector store providers inherit from `BaseVectorStore`, which defines the core operations required for memory storage and retrieval.\n\n### Core Methods\n\n| Method | Description |\n|--------|-------------|\n| `add` | Insert vectors with associated metadata into the store |\n| `search` | Query vectors by semantic similarity with optional filters |\n| `get` | Retrieve specific vector entries by ID |\n| `delete` | Remove vectors from the store |\n| `update` | Modify existing vector entries |\n| `list` | List all vectors with optional pagination and filters |\n\n资料来源：[mem0/vector_stores/base.py]()\n\n### Data Model\n\nEach vector entry in the store contains:\n\n```python\n{\n    \"id\": str,           # Unique identifier (UUID)\n    \"vector\": List[float],  # Embedding vector\n    \"data\": str,         # Original text content\n    \"metadata\": {\n        \"user_id\": str,\n        \"agent_id\": Optional[str],\n        \"run_id\": Optional[str],\n        \"event\": Optional[str],\n        \"created_at\": str,\n        \"memory_type\": Optional[str]\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/base.py]()\n\n## Supported Providers\n\n### Provider Comparison\n\n| Provider | Type | Deployment | Scalability | Use Case |\n|----------|------|------------|-------------|----------|\n| **Chroma** | Local/Embedded | In-process | Low | Development, prototyping |\n| **Pinecone** | Cloud/Managed | Hosted | Very High | Production at scale |\n| **Qdrant** | Self-hosted/Cloud | Docker/K8s | High | Self-hosted production |\n| **PGVector** | Self-hosted | PostgreSQL extension | High | Existing Postgres infra |\n| **Weaviate** | Self-hosted/Cloud | Docker/K8s | High | Knowledge graphs |\n| **Redis** | Self-hosted/Cloud | Redis Stack | Medium | Cache + vector hybrid |\n\n资料来源：[docs/components/vectordbs/overview.mdx]()\n\n### Chroma (Development)\n\nChroma is the default vector store for local development and testing. It runs as an embedded database within the application process.\n\n**Characteristics:**\n- Zero-configuration setup\n- In-process operation\n- File-based persistence\n- Best for development and evaluation\n\n资料来源：[mem0/vector_stores/chroma.py]()\n\n### Pinecone (Cloud)\n\nPinecone is a managed vector database service offering serverless and pod-based deployments.\n\n**Configuration:**\n```python\n{\n    \"vector_store\": {\n        \"provider\": \"pinecone\",\n        \"config\": {\n            \"api_key\": \"your-api-key\",\n            \"index_name\": \"mem0-memory\",\n            \"environment\": \"gcp-starter\"\n        }\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/pinecone.py](), [docs/components/vectordbs/config.mdx]()\n\n### Qdrant (Self-hosted/Cloud)\n\nQdrant is an open-source vector search engine with both self-hosted and cloud options.\n\n**Configuration:**\n```python\n{\n    \"vector_store\": {\n        \"provider\": \"qdrant\",\n        \"config\": {\n            \"host\": \"localhost\",\n            \"port\": 6333,\n            \"collection_name\": \"mem0\"\n        }\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/qdrant.py]()\n\n### PGVector (PostgreSQL)\n\nPGVector extends PostgreSQL with vector similarity search capabilities, ideal for applications already using PostgreSQL.\n\n**Configuration:**\n```python\n{\n    \"vector_store\": {\n        \"provider\": \"pgvector\",\n        \"config\": {\n            \"host\": \"localhost\",\n            \"port\": 5432,\n            \"dbname\": \"mem0\",\n            \"user\": \"postgres\",\n            \"password\": \"password\"\n        }\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/pgvector.py]()\n\n### Weaviate\n\nWeaviate is an open-source vector database with built-in support for hybrid search and knowledge graphs.\n\n**Configuration:**\n```python\n{\n    \"vector_store\": {\n        \"provider\": \"weaviate\",\n        \"config\": {\n            \"url\": \"http://localhost:8080\",\n            \"api_key\": \"your-api-key\",  # Optional, for cloud\n            \"index_name\": \"Mem0\"\n        }\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/weaviate.py]()\n\n### Redis\n\nRedis Stack provides vector search capabilities built on the popular in-memory data store.\n\n**Configuration:**\n```python\n{\n    \"vector_store\": {\n        \"provider\": \"redis\",\n        \"config\": {\n            \"host\": \"localhost\",\n            \"port\": 6379,\n            \"index_name\": \"mem0\",\n            \"password\": \"password\"  # Optional\n        }\n    }\n}\n```\n\n资料来源：[mem0/vector_stores/redis.py]()\n\n## Configuration System\n\n### Configuration Schema\n\nThe vector store configuration is defined in `configs.py` and follows a structured schema:\n\n```python\n@dataclass\nclass VectorStoreConfig:\n    provider: str                    # Provider name\n    collection_name: str             # Collection/index name\n    embedding_model_dims: int        # Embedding dimension size\n    api_key: Optional[str] = None   # Provider API key\n    # ... additional provider-specific fields\n```\n\n资料来源：[mem0/vector_stores/configs.py]()\n\n### Configuration File\n\nVector store settings are typically defined in `config.yaml`:\n\n```yaml\nvector_store:\n  provider: \"chroma\"  # or pinecone, qdrant, pgvector, weaviate, redis\n  collection_name: \"mem0\"\n  embedding_model_dims: 1536\n```\n\n资料来源：[docs/components/vectordbs/config.mdx]()\n\n### Environment Variables\n\nMany providers support configuration via environment variables:\n\n| Variable | Provider | Description |\n|----------|----------|-------------|\n| `PINECONE_API_KEY` | Pinecone | Pinecone API key |\n| `QDRANT_HOST` | Qdrant | Qdrant server host |\n| `REDIS_PASSWORD` | Redis | Redis authentication |\n| `WEAVIATE_API_KEY` | Weaviate | Weaviate cloud API key |\n\n## Search Operations\n\n### Semantic Search\n\nThe primary operation for memory retrieval is semantic search, which finds vectors most similar to a query embedding.\n\n```python\nresults = vector_store.search(\n    query=\"user's preference for morning coffee\",\n    limit=5,\n    filters={\n        \"user_id\": \"user-123\"\n    }\n)\n```\n\n### Search Parameters\n\n| Parameter | Type | Default | Description |\n|-----------|------|---------|-------------|\n| `query` | str | Required | Search query text |\n| `limit` | int | 10 | Maximum results to return |\n| `filters` | dict | None | Metadata filters |\n| `min_score` | float | None | Minimum similarity threshold |\n\n资料来源：[mem0/vector_stores/base.py]()\n\n### Metadata Filtering\n\nMem0 supports filtering search results by various metadata fields:\n\n```python\nfilters = {\n    \"user_id\": \"user-123\",           # Required: filter by user\n    \"agent_id\": \"agent-456\",         # Optional: filter by agent\n    \"run_id\": \"run-789\",             # Optional: filter by session\n    \"memory_type\": \"preference\",     # Optional: filter by type\n    \"created_at\": {\"$gte\": \"2024-01-01\"}  # Optional: time-based\n}\n```\n\n## Memory Management\n\n### Adding Memories\n\n```python\nvector_store.add(\n    vectors=embeddings,\n    documents=memory_texts,\n    metadatas=metadata_list\n)\n```\n\n### Updating Memories\n\n```python\nvector_store.update(\n    id=\"memory-uuid\",\n    vector=new_embedding,\n    data=new_text,\n    metadata=updated_metadata\n)\n```\n\n### Deleting Memories\n\n```python\n# Delete single memory\nvector_store.delete(id=\"memory-uuid\")\n\n# Delete all memories for a user\nvector_store.delete(filters={\"user_id\": \"user-123\"})\n\n# Delete all memories\nvector_store.delete(delete_all=True)\n```\n\n## Embedding Integration\n\nVector stores work in conjunction with Mem0's embedding service to convert text into vector representations.\n\n```mermaid\ngraph LR\n    A[User Message] --> B[Embedding Service]\n    B --> C[Embedding Vector]\n    C --> D[Vector Store]\n    D --> E[Storage / Retrieval]\n    \n    F[Search Query] --> G[Embedding Service]\n    G --> H[Query Vector]\n    H --> D\n    D --> I[Similarity Search]\n    I --> J[Top-K Results]\n```\n\nThe embedding dimension must match the vector store configuration. Mem0 uses 1536 dimensions by default (OpenAI text-embedding-3-small).\n\n资料来源：[docs/components/vectordbs/overview.mdx]()\n\n## Best Practices\n\n### Development vs Production\n\n| Aspect | Development | Production |\n|--------|-------------|------------|\n| **Provider** | Chroma | Pinecone/Qdrant/PGVector |\n| **Deployment** | Local embedded | Managed/self-hosted |\n| **Persistence** | File-based | Cloud/server |\n| **Scaling** | Limited | Horizontal |\n\n### Performance Considerations\n\n1. **Index Management**: Ensure proper indexing is configured for your provider\n2. **Batch Operations**: Use batch inserts when adding multiple memories\n3. **Connection Pooling**: Configure connection pools for high-throughput scenarios\n4. **Embedding Cache**: Cache embeddings to avoid redundant computations\n\n### Security\n\n- Store API keys in environment variables, not in configuration files\n- Use TLS/SSL connections for production deployments\n- Implement proper access controls based on user_id filtering\n\n## Troubleshooting\n\n### Common Issues\n\n| Issue | Cause | Solution |\n|-------|-------|----------|\n| `Dimension mismatch` | Embedding model dims != index config | Update `embedding_model_dims` in config |\n| `Connection refused` | Wrong host/port | Verify provider configuration |\n| `Authentication failed` | Invalid API key | Check API key in environment |\n| `Index not found` | Collection doesn't exist | Create index or use auto-creation |\n\n### Debug Mode\n\nEnable verbose logging for vector store operations:\n\n```python\nimport logging\nlogging.getLogger(\"mem0.vector_stores\").setLevel(logging.DEBUG)\n```\n\n## See Also\n\n- [Memory Core](memory-core) - The main memory orchestration layer\n- [Embedding Services](embeddings) - Text vectorization\n- [Configuration Guide](../setup/configuration) - Full configuration reference\n- [Deployment Guide](../deployment) - Production deployment patterns\n\n---\n\n<a id='page-embeddings'></a>\n\n## Embedding Models\n\n### 相关页面\n\n相关主题：[AI Model Integration](#page-ai-integration), [Vector Stores and Storage](#page-vector-stores)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/embeddings/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/base.py)\n- [mem0/embeddings/openai.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/openai.py)\n- [mem0/embeddings/azure_openai.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/azure_openai.py)\n- [mem0/embeddings/huggingface.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/huggingface.py)\n- [mem0/embeddings/ollama.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/ollama.py)\n- [mem0/embeddings/configs.py](https://github.com/mem0ai/mem0/blob/main/mem0/embeddings/configs.py)\n- [docs/components/embedders/overview.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/embedders/overview.mdx)\n- [docs/components/embedders/models/openai.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/embedders/models/openai.mdx)\n- [docs/components/embedders/models/huggingface.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/embedders/models/huggingface.mdx)\n- [docs/components/embedders/models/ollama.mdx](https://github.com/mem0ai/mem0/blob/main/docs/components/embedders/models/ollama.mdx)\n</details>\n\n# Embedding Models\n\nEmbedding models are a fundamental component of the mem0 memory system. They transform textual information into dense vector representations (embeddings) that enable semantic search, similarity matching, and efficient memory retrieval. The embedding layer sits at the core of mem0's architecture, bridging raw user interactions with the vector-based storage layer.\n\n## Overview\n\nMem0 provides a flexible, provider-agnostic embedding abstraction that supports multiple embedding backends while maintaining a consistent interface. This design allows users to choose embedding providers based on their requirements for cost, latency, privacy, or quality.\n\nThe embedding system in mem0 is built around an abstract base class that defines the contract for all concrete implementations. Each provider implementation handles the specifics of API communication, response parsing, and error handling while conforming to the unified interface.\n\n**Key characteristics of mem0's embedding layer:**\n\n- Provider-agnostic abstraction with consistent API across implementations\n- Support for both cloud-based and local embedding models\n- Configuration-driven provider selection\n- Seamless integration with the vector storage layer\n- Extensible architecture for adding custom embedding providers\n\n## Architecture\n\n```mermaid\ngraph TD\n    A[User Input] --> B[Memory Layer]\n    B --> C[Embedding Module]\n    C --> D[Vector Store]\n    \n    C --> E[OpenAI Embedder]\n    C --> F[Azure OpenAI Embedder]\n    C --> G[HuggingFace Embedder]\n    C --> H[Ollama Embedder]\n    \n    E --> I[text-embedding-3-small]\n    F --> J[Azure OpenAI Models]\n    G --> K[HF Sentence Transformers]\n    H --> L[Local Ollama Models]\n    \n    D --> M[Semantic Search]\n    D --> N[Memory Retrieval]\n    D --> O[Similarity Matching]\n```\n\n## Supported Providers\n\nMem0 supports multiple embedding providers to accommodate various deployment scenarios. Each provider implements the same abstract interface, allowing transparent switching between backends.\n\n### Provider Comparison\n\n| Provider | Type | Default Model | API Key Required | Local Model Support |\n|----------|------|---------------|------------------|---------------------|\n| OpenAI | Cloud | `text-embedding-3-small` | Yes | No |\n| Azure OpenAI | Cloud | Configurable | Yes | No |\n| HuggingFace | Cloud/Self-hosted | Various sentence-transformers | Optional | Yes |\n| Ollama | Local | `nomic-embed-text` | No | Yes |\n\n## Configuration\n\nEmbedding models are configured through the mem0 configuration system. Each provider has its own configuration parameters, but all share a common structure.\n\n### Basic Configuration\n\n```python\nfrom mem0 import Memory\n\nconfig = {\n    \"embedder\": {\n        \"provider\": \"openai\",\n        \"config\": {\n            \"model\": \"text-embedding-3-small\",\n            \"api_key\": \"sk-...\"\n        }\n    }\n}\n\nmemory = Memory.from_config(config)\n```\n\n### Environment Variable Configuration\n\nMany configuration parameters can be set via environment variables, simplifying deployment and reducing boilerplate code:\n\n| Environment Variable | Description | Provider |\n|---------------------|-------------|----------|\n| `OPENAI_API_KEY` | OpenAI API key for embeddings | OpenAI |\n| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key | Azure OpenAI |\n| `HF_TOKEN` | HuggingFace API token | HuggingFace |\n| `OLLAMA_BASE_URL` | Ollama server URL | Ollama |\n\n## OpenAI Embeddings\n\nThe OpenAI embedder provides access to OpenAI's embedding models through the official API. It is the default provider in mem0 and offers a balance of quality, cost, and ease of use.\n\n### Supported Models\n\n| Model | Dimensions | Output Format | Use Case |\n|-------|------------|---------------|----------|\n| `text-embedding-3-small` | 1536 | Float32 | General purpose, recommended |\n| `text-embedding-3-large` | 3072 | Float32 | Higher quality, larger vectors |\n| `text-embedding-ada-002` | 1536 | Float32 | Legacy model, compatible |\n\n### Configuration Options\n\n```python\n{\n    \"provider\": \"openai\",\n    \"config\": {\n        \"model\": \"text-embedding-3-small\",  # Optional, defaults to text-embedding-3-small\n        \"api_key\": \"sk-...\",                 # Optional if OPENAI_API_KEY is set\n        \"base_url\": \"https://api.openai.com/v1\",  # Optional, for proxies\n        \"timeout\": 60,                       # Optional, request timeout in seconds\n        \"max_retries\": 3                     # Optional, number of retries on failure\n    }\n}\n```\n\n## Azure OpenAI Embeddings\n\nAzure OpenAI embeddings provide the same model quality as OpenAI with enterprise-grade security, compliance, and regional availability. This is the preferred option for organizations requiring Azure infrastructure.\n\n### Configuration Options\n\n```python\n{\n    \"provider\": \"azure_openai\",\n    \"config\": {\n        \"model\": \"text-embedding-3-small\",    # Model deployment name\n        \"api_key\": \"your-azure-api-key\",\n        \"azure_endpoint\": \"https://your-resource.openai.azure.com\",\n        \"azure_deployment\": \"your-deployment-name\",\n        \"api_version\": \"2024-02-01\"           # Optional, Azure API version\n    }\n}\n```\n\n## HuggingFace Embeddings\n\nThe HuggingFace embedder supports both cloud-based inference and local models from the HuggingFace ecosystem. This provides flexibility for privacy-sensitive applications or cost optimization.\n\n### Supported Model Families\n\n| Model Type | Examples | Description |\n|------------|----------|-------------|\n| Sentence Transformers | `all-MiniLM-L6-v2`, `BAAI/bge-large` | Optimized for sentence-level embeddings |\n| Generic Transformers | `bert-base-uncased` | General-purpose transformer models |\n\n### Configuration Options\n\n```python\n{\n    \"provider\": \"huggingface\",\n    \"config\": {\n        \"model\": \"sentence-transformers/all-MiniLM-L6-v2\",  # Model identifier\n        \"token\": \"hf_...\",           # Optional, for gated models\n        \"device\": \"cpu\",             # Optional, cpu/cuda/mps\n        \"encode_kwargs\": {          # Optional, encoding parameters\n            \"normalize_embeddings\": True\n        }\n    }\n}\n```\n\n## Ollama Embeddings\n\nOllama enables running embedding models entirely locally, providing complete data privacy and no API costs. This is ideal for development, testing, or production environments with strict data residency requirements.\n\n### Supported Models\n\n| Model | Dimensions | Description |\n|-------|------------|-------------|\n| `nomic-embed-text` | 768 | High-quality, efficient embeddings |\n| `mxbai-embed-large` | 1024 | Larger model for higher quality |\n| Custom Ollama models | Variable | Any embedding model available in Ollama |\n\n### Configuration Options\n\n```python\n{\n    \"provider\": \"ollama\",\n    \"config\": {\n        \"model\": \"nomic-embed-text\",        # Model name\n        \"base_url\": \"http://localhost:11434\" # Ollama server URL\n    }\n}\n```\n\n## Base Interface\n\nAll embedding providers inherit from the abstract base class that defines the standard interface:\n\n```python\nclass EmbedderBase(ABC):\n    @abstractmethod\n    def embed(self, text: str) -> List[float]:\n        \"\"\"Generate embedding vector for a single text.\"\"\"\n        pass\n    \n    @abstractmethod\n    def embed_batch(self, texts: List[str]) -> List[List[float]]:\n        \"\"\"Generate embedding vectors for multiple texts.\"\"\"\n        pass\n    \n    @abstractmethod\n    def get_vector_size(self) -> int:\n        \"\"\"Return the dimensionality of embedding vectors.\"\"\"\n        pass\n```\n\n## Usage Patterns\n\n### Single Text Embedding\n\n```python\nfrom mem0 import Memory\n\nmemory = Memory()\nresult = memory.add(\"User prefers dark mode theme\", user_id=\"user123\")\n```\n\n### Batch Embedding\n\n```python\nfrom mem0 import Memory\n\nmemory = Memory()\nmessages = [\n    \"User lives in San Francisco\",\n    \"Prefers coffee over tea\",\n    \"Works as a software engineer\"\n]\nresult = memory.add_batch(messages, user_id=\"user123\")\n```\n\n### Semantic Search with Custom Embedder\n\n```python\nfrom mem0 import Memory\n\nconfig = {\n    \"embedder\": {\n        \"provider\": \"ollama\",\n        \"config\": {\n            \"model\": \"nomic-embed-text\",\n            \"base_url\": \"http://localhost:11434\"\n        }\n    }\n}\n\nmemory = Memory.from_config(config)\nresults = memory.search(\"What are the user's preferences?\", user_id=\"user123\")\n```\n\n## Extending with Custom Providers\n\nTo add a new embedding provider, implement the `EmbedderBase` abstract class:\n\n```python\nfrom mem0.embeddings.base import EmbedderBase\n\nclass CustomEmbedder(EmbedderBase):\n    def __init__(self, config: dict):\n        self.config = config\n        # Initialize your embedding client\n    \n    def embed(self, text: str) -> List[float]:\n        # Implement single text embedding\n        pass\n    \n    def embed_batch(self, texts: List[str]) -> List[List[float]]:\n        # Implement batch embedding\n        pass\n    \n    def get_vector_size(self) -> int:\n        # Return embedding dimensions\n        pass\n```\n\n## Best Practices\n\n1. **Model Selection**: Choose `text-embedding-3-small` for general use cases as it offers the best balance of quality and cost. Use `text-embedding-3-large` when higher accuracy is required.\n\n2. **Local Deployment**: For privacy-sensitive applications, use Ollama with `nomic-embed-text` to keep all data local.\n\n3. **Consistent Embedding Dimensions**: Ensure all memories use the same embedding model and configuration for proper similarity calculations.\n\n4. **API Key Management**: Use environment variables for API keys in production environments rather than hardcoding credentials.\n\n5. **Error Handling**: Implement appropriate retry logic and timeout settings, especially when using cloud-based embedding providers.\n\n## Related Components\n\n- **Vector Store**: The embedding layer feeds into the vector storage system for efficient similarity search\n- **Memory Layer**: High-level memory operations use embeddings for storage and retrieval\n- **Configuration System**: Centralized configuration management for all embedding providers\n\n---\n\n<a id='page-python-sdk'></a>\n\n## Python SDK\n\n### 相关页面\n\n相关主题：[Memory Operations](#page-memory-operations), [TypeScript/Node.js SDK](#page-typescript-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0/__init__.py](https://github.com/mem0ai/mem0/blob/main/mem0/__init__.py)\n- [mem0/memory/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/memory/main.py)\n- [mem0/client/main.py](https://github.com/mem0ai/mem0/blob/main/mem0/client/main.py)\n- [mem0/configs/base.py](https://github.com/mem0ai/mem0/blob/main/mem0/configs/base.py)\n- [mem0/exceptions.py](https://github.com/mem0ai/mem0/blob/main/mem0/exceptions.py)\n- [docs/open-source/python-quickstart.mdx](https://github.com/mem0ai/mem0/blob/main/docs/open-source/python-quickstart.mdx)\n- [docs/api-reference.mdx](https://github.com/mem0ai/mem0/blob/main/docs/api-reference.mdx)\n</details>\n\n# Python SDK\n\n## Overview\n\nThe mem0 Python SDK provides a programmatic interface for integrating memory management capabilities into AI applications. It enables developers to store, retrieve, search, and manage persistent memory across AI agent interactions, supporting both self-hosted deployments and managed cloud services.\n\n资料来源：[mem0/__init__.py:1-50]()\n\n## Architecture\n\nThe SDK is structured around three core components that handle different aspects of memory operations:\n\n```mermaid\ngraph TD\n    A[Client Layer] --> B[Memory Layer]\n    A --> C[Configuration]\n    B --> D[Vector Store]\n    B --> E[LLM Integration]\n    C --> F[BaseConfig]\n    C --> G[LLMConfig]\n    C --> H[VectorStoreConfig]\n```\n\n### Core Components\n\n| Component | File | Purpose |\n|-----------|------|---------|\n| Client | `mem0/client/main.py` | High-level API for cloud and self-hosted deployments |\n| Memory | `mem0/memory/main.py` | Core memory operations engine |\n| Configs | `mem0/configs/base.py` | Configuration management for providers |\n\n资料来源：[mem0/client/main.py:1-30]()\n\n## Installation\n\nInstall the mem0 package along with required dependencies:\n\n```bash\npip install mem0ai\n```\n\nFor specific LLM and vector store backends, install additional packages:\n\n```bash\n# OpenAI + Qdrant\npip install mem0ai[openai,qdrant]\n\n# Azure OpenAI + Chroma\npip install mem0ai[azure-openai,chromadb]\n```\n\n资料来源：[docs/open-source/python-quickstart.mdx:1-50]()\n\n## Quick Start\n\n### Basic Memory Operations\n\n```python\nfrom mem0 import Memory\n\n# Initialize memory instance\nmemory = Memory()\n\n# Add memories\nresult = memory.add(\n    messages=[\n        {\"role\": \"user\", \"content\": \"I'm planning to visit Tokyo next month.\"},\n        {\"role\": \"assistant\", \"content\": \"That's exciting! Tokyo has great places to visit.\"}\n    ],\n    user_id=\"user_123\"\n)\n\n# Search memories\nresults = memory.search(\n    query=\"What are my travel plans?\",\n    user_id=\"user_123\"\n)\n\n# Get all memories for a user\nall_memories = memory.get_all(user_id=\"user_123\")\n\n# Update a memory\nmemory.update(memory_id=\"mem_xxx\", data=\"Updated content here\")\n\n# Delete a memory\nmemory.delete(memory_id=\"mem_xxx\")\n```\n\n资料来源：[docs/open-source/python-quickstart.mdx:50-100]()\n\n## Configuration\n\n### Configuration Parameters\n\n| Parameter | Type | Description | Default |\n|-----------|------|-------------|---------|\n| `llm` | dict | LLM provider configuration | Required |\n| `vector_store` | dict | Vector store provider configuration | Required |\n| `embedder` | dict | Embedding model configuration | Optional |\n| `memory_history_limit` | int | Number of conversation turns to retain | 20 |\n\n资料来源：[mem0/configs/base.py:1-80]()\n\n### LLM Configuration\n\n```python\nfrom mem0 import Memory\nfrom mem0.configs.base import LLMConfig\n\nconfig = LLMConfig(\n    provider=\"openai\",\n    model=\"gpt-4o\",\n    api_key=\"your-api-key\"\n)\n\nmemory = Memory.from_config(llm_config=config)\n```\n\n### Vector Store Configuration\n\n```python\nfrom mem0.configs.base import VectorStoreConfig\n\nvector_config = VectorStoreConfig(\n    provider=\"qdrant\",\n    host=\"localhost\",\n    port=6333,\n    collection_name=\"memories\"\n)\n```\n\n资料来源：[mem0/configs/base.py:80-150]()\n\n## Memory Operations API\n\n### Adding Memories\n\nThe `add()` method stores new memories from conversation messages:\n\n```python\nmemory.add(\n    messages=[{\"role\": \"user\", \"content\": \"User message\"}],\n    user_id=\"user_123\",\n    session_id=\"session_456\",\n    metadata={\"source\": \"chat\"}\n)\n```\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `messages` | list[dict] | Yes | List of message objects with role and content |\n| `user_id` | str | Yes | Unique identifier for the user |\n| `session_id` | str | No | Session or conversation identifier |\n| `metadata` | dict | No | Additional metadata to attach |\n\n资料来源：[mem0/memory/main.py:100-180]()\n\n### Searching Memories\n\n```python\nresults = memory.search(\n    query=\"Find information about...\",\n    user_id=\"user_123\",\n    limit=5,\n    rerank=True\n)\n```\n\n| Parameter | Type | Required | Description |\n|-----------|------|----------|-------------|\n| `query` | str | Yes | Search query text |\n| `user_id` | str | No | Filter by user |\n| `limit` | int | No | Maximum results (default: 10) |\n| `rerank` | bool | No | Apply reranking to results |\n\n资料来源：[mem0/memory/main.py:180-250]()\n\n### Retrieving Memories\n\n```python\n# Get all memories for a user\nall_memories = memory.get_all(user_id=\"user_123\")\n\n# Get specific memory by ID\nmemory_item = memory.get(memory_id=\"mem_xxx\")\n```\n\n### Updating Memories\n\n```python\nmemory.update(\n    memory_id=\"mem_xxx\",\n    data=\"Updated memory content\",\n    metadata={\"key\": \"value\"}\n)\n```\n\n### Deleting Memories\n\n```python\n# Delete specific memory\nmemory.delete(memory_id=\"mem_xxx\")\n\n# Delete all memories for a user\nmemory.delete_all(user_id=\"user_123\")\n```\n\n资料来源：[mem0/memory/main.py:250-350]()\n\n## Client Interface\n\nThe `Mem0` client provides a unified interface for interacting with mem0 services:\n\n```python\nfrom mem0 import Mem0\n\n# Initialize client\nclient = Mem0(api_key=\"your-api-key\", app_id=\"your-app-id\")\n\n# Add memories via client\nresult = client.add(\n    messages=[{\"role\": \"user\", \"content\": \"Hello\"}],\n    user_id=\"user_123\"\n)\n```\n\n资料来源：[mem0/client/main.py:1-100]()\n\n## Exception Handling\n\nThe SDK defines custom exceptions for error handling:\n\n| Exception | Description |\n|-----------|-------------|\n| `Mem0Exception` | Base exception class for all mem0 errors |\n| `ValidationError` | Invalid input parameters |\n| `AuthenticationError` | Invalid or missing API credentials |\n| `RateLimitError` | API rate limit exceeded |\n| `NotFoundError` | Requested resource not found |\n\n资料来源：[mem0/exceptions.py:1-50]()\n\n### Handling Exceptions\n\n```python\nfrom mem0.exceptions import Mem0Exception, ValidationError\n\ntry:\n    memory.add(messages=[], user_id=\"user_123\")\nexcept ValidationError as e:\n    print(f\"Invalid input: {e}\")\nexcept Mem0Exception as e:\n    print(f\"Memory operation failed: {e}\")\n```\n\n## Data Flow\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant SDK as Python SDK\n    participant Memory as Memory Engine\n    participant Vector as Vector Store\n    participant LLM as LLM Provider\n\n    App->>SDK: memory.add(messages)\n    SDK->>Memory: process_messages()\n    Memory->>LLM: extract_and_summarize()\n    LLM-->>Memory: structured_memories\n    Memory->>Vector: store(memories)\n    Vector-->>Memory: confirm\n    Memory-->>SDK: result\n    SDK-->>App: MemoryResult\n```\n\n## Supported Providers\n\n### LLM Providers\n\n| Provider | Package | Configuration Key |\n|----------|---------|-------------------|\n| OpenAI | `openai` | `openai` |\n| Azure OpenAI | `azure-openai` | `azure_openai` |\n| Anthropic | `anthropic` | `anthropic` |\n| Groq | `groq` | `groq` |\n| Ollama | `ollama` | `ollama` |\n| LM Studio | `lmstudio` | `lmstudio` |\n\n### Vector Store Providers\n\n| Provider | Package | Configuration Key |\n|----------|---------|-------------------|\n| Qdrant | `qdrant-client` | `qdrant` |\n| Chroma | `chromadb` | `chroma` |\n| Weaviate | `weaviate-client` | `weaviate` |\n| Milvus | `pymilvus` | `milvus` |\n| Pinecone | `pinecone-client` | `pinecone` |\n\n资料来源：[mem0/configs/base.py:150-250]()\n\n## Advanced Configuration\n\n### Self-Hosted Deployment\n\n```python\nfrom mem0 import Memory\n\nmemory = Memory()\n\n# Configure with custom providers\nmemory.configure(\n    llm={\n        \"provider\": \"ollama\",\n        \"model\": \"llama3.1\",\n        \"api_base\": \"http://localhost:11434\"\n    },\n    vector_store={\n        \"provider\": \"qdrant\",\n        \"host\": \"localhost\",\n        \"port\": 6333\n    }\n)\n```\n\n资料来源：[docs/open-source/python-quickstart.mdx:100-150]()\n\n### Embedder Configuration\n\n```python\nmemory.configure(\n    embedder={\n        \"provider\": \"openai\",\n        \"model\": \"text-embedding-3-small\",\n        \"dimension\": 1536\n    }\n)\n```\n\n## Best Practices\n\n1. **User Identification**: Always provide unique `user_id` for each user to maintain proper memory isolation\n2. **Session Management**: Use `session_id` to organize memories within conversation threads\n3. **Metadata**: Attach relevant metadata for better searchability and filtering\n4. **Error Handling**: Implement proper exception handling for production applications\n5. **Configuration**: Store API keys securely using environment variables\n\n## See Also\n\n- [API Reference Documentation](https://github.com/mem0ai/mem0/blob/main/docs/api-reference.mdx)\n- [Open Source Quickstart](https://github.com/mem0ai/mem0/blob/main/docs/open-source/python-quickstart.mdx)\n- [GitHub Repository](https://github.com/mem0ai/mem0)\n\n---\n\n<a id='page-typescript-sdk'></a>\n\n## TypeScript/Node.js SDK\n\n### 相关页面\n\n相关主题：[Python SDK](#page-python-sdk)\n\n<details>\n<summary>相关源码文件</summary>\n\n以下源码文件用于生成本页说明：\n\n- [mem0-ts/src/client/mem0.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.ts)\n- [mem0-ts/src/client/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/index.ts)\n- [mem0-ts/src/client/mem0.types.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.types.ts)\n- [mem0-ts/src/client/config.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/config.ts)\n- [mem0-ts/src/oss/src/memory/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/oss/src/memory/index.ts)\n- [mem0-ts/src/oss/src/types/index.ts](https://github.com/memp0ai/mem0/blob/main/mem0-ts/src/oss/src/types/index.ts)\n- [mem0-ts/README.md](https://github.com/mem0ai/mem0/blob/main/mem0-ts/README.md)\n- [mem0-ts/package.json](https://github.com/mem0ai/mem0/blob/main/mem0-ts/package.json)\n</details>\n\n# TypeScript/Node.js SDK\n\nThe mem0 TypeScript/Node.js SDK provides a robust client library for integrating memory management capabilities into JavaScript and TypeScript applications. It enables developers to store, retrieve, search, and manage persistent memory across user interactions and AI agent workflows.\n\n## Overview\n\nThe SDK offers two primary deployment modes:\n\n| Mode | Description | Use Case |\n|------|-------------|----------|\n| **Hosted (mem0ai)** | Cloud-hosted memory service with API key authentication | Production applications requiring managed infrastructure |\n| **Open Source (OSS)** | Self-hosted memory implementation running entirely within the application | Privacy-sensitive applications, on-premise deployments, custom infrastructure |\n\n资料来源：[mem0-ts/README.md](https://github.com/mem0ai/mem0/blob/main/mem0-ts/README.md)\n\n## Architecture\n\n```mermaid\ngraph TD\n    A[Application] --> B[Mem0Client]\n    B --> C{Deployment Mode}\n    C -->|Hosted| D[mem0ai Cloud API]\n    C -->|OSS| E[Local Memory Store]\n    D --> F[Vector Database]\n    E --> G[SQLite/Vector Store]\n    \n    H[Mem0Config] --> B\n    I[API Key] --> B\n```\n\nThe SDK architecture separates configuration management, client initialization, and memory operations into distinct modules. The `Mem0Client` class serves as the primary interface, accepting a `Mem0Config` object during instantiation to determine deployment mode and connection parameters.\n\n资料来源：[mem0-ts/src/client/mem0.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.ts)\n\n## Installation\n\nInstall the SDK using npm, yarn, or pnpm:\n\n```bash\nnpm install mem0ai\n# or\nyarn add mem0ai\n# or\npnpm add mem0ai\n```\n\nThe package name is `mem0ai` on npm, supporting both CommonJS and ESM module formats.\n\n资料来源：[mem0-ts/package.json](https://github.com/mem0ai/mem0/blob/main/mem0-ts/package.json)\n\n## Configuration\n\n### Mem0Config Parameters\n\n| Parameter | Type | Required | Default | Description |\n|-----------|------|----------|---------|-------------|\n| `apiKey` | `string` | Conditional | - | API key for hosted mem0ai service. Required when `orgId` or `projectId` is provided |\n| `orgId` | `string` | No | - | Organization ID for hosted deployment |\n| `projectId` | `string` | No | - | Project ID for hosted deployment |\n| `host` | `string` | No | `\"https://api.mem0.ai\"` | Base URL for hosted API endpoint |\n\n### OSS Configuration\n\n| Parameter | Type | Required | Default | Description |\n|-----------|------|----------|---------|-------------|\n| `embedder` | `Embedder` | Yes | - | Embedding model configuration for vectorization |\n| `vectorStore` | `VectorStore` | Yes | - | Vector storage backend (Chroma, Qdrant, or in-memory) |\n| `db` | `Database` | Yes | - | SQLite database for structured data |\n| `version` | `string` | No | `\"v1.0\"` | Memory schema version |\n\n资料来源：[mem0-ts/src/client/config.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/config.ts)\n\n## Client Initialization\n\n### Hosted Mode\n\n```typescript\nimport { Mem0Client } from \"mem0ai\";\n\nconst client = new Mem0Client({\n  apiKey: \"your-api-key\",\n  orgId: \"your-org-id\",    // optional\n  projectId: \"your-project-id\"  // optional\n});\n```\n\n### Open Source Mode\n\n```typescript\nimport { Mem0Client } from \"mem0ai\";\n\nconst client = new Mem0Client({\n  embedder: {\n    provider: \"openai\",\n    config: {\n      api_key: \"your-openai-key\",\n      model: \"text-embedding-3-small\"\n    }\n  },\n  vectorStore: {\n    provider: \"chroma\",\n    config: {\n      collection_name: \"memory\"\n    }\n  },\n  db: {\n    provider: \"sqlite\"\n  }\n});\n```\n\n资料来源：[mem0-ts/src/client/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/index.ts)\n\n## Core API Methods\n\n### Memory Operations\n\n| Method | Parameters | Return Type | Description |\n|--------|------------|-------------|-------------|\n| `add` | `messages`, `userId`, `metadata`, `filters` | `Promise<MemoryResult[]>` | Store new memories |\n| `search` | `query`, `userId`, `filters`, `limit` | `Promise<MemoryResult[]>` | Semantic search across memories |\n| `getAll` | `userId`, `filters` | `Promise<MemoryResult[]>` | Retrieve all memories for a user |\n| `get` | `memoryId` | `Promise<MemoryResult>` | Fetch a specific memory by ID |\n| `update` | `memoryId`, `data`, `metadata` | `Promise<MemoryResult>` | Modify existing memory content |\n| `delete` | `memoryId` | `Promise<void>` | Remove a memory entry |\n| `reset` | `userId` | `Promise<void>` | Delete all memories for a user |\n\n资料来源：[mem0-ts/src/client/mem0.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.ts)\n\n### MemoryResult Data Model\n\n```typescript\ninterface MemoryResult {\n  id: string;           // Unique memory identifier\n  memory: string;       // Memory content text\n  event: string;        // Event type (e.g., \"memory\", \"preference\", \"fact\")\n  created_at: string;   // ISO timestamp\n  updated_at: string;   // ISO timestamp\n  metadata?: {          // Optional metadata object\n    category?: string;\n    source?: string;\n    [key: string]: any;\n  };\n}\n```\n\n资料来源：[mem0-ts/src/client/mem0.types.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.types.ts)\n\n### Add Memories\n\n```typescript\n// Add a single memory\nconst memories = await client.add({\n  messages: [\n    { role: \"user\", content: \"I prefer dark mode in my IDE\" },\n    { role: \"assistant\", content: \"I'll remember that you prefer dark mode\" }\n  ],\n  userId: \"user-123\"\n});\n\n// With metadata\nconst memories = await client.add({\n  messages: [\n    { role: \"user\", content: \"Book a flight to Tokyo next month\" }\n  ],\n  userId: \"user-123\",\n  metadata: {\n    category: \"travel\",\n    priority: \"high\"\n  }\n});\n```\n\n### Search Memories\n\n```typescript\nconst results = await client.search({\n  query: \"What are my IDE preferences?\",\n  userId: \"user-123\",\n  limit: 5\n});\n\nresults.forEach(memory => {\n  console.log(`${memory.id}: ${memory.memory}`);\n  console.log(`Category: ${memory.metadata?.category}`);\n});\n```\n\n### Get All Memories\n\n```typescript\nconst allMemories = await client.getAll({\n  userId: \"user-123\"\n});\n```\n\n### Update Memory\n\n```typescript\nawait client.update({\n  memoryId: \"memory-uuid-here\",\n  data: \"Updated memory content here\",\n  metadata: {\n    category: \"updated-category\"\n  }\n});\n```\n\n### Delete Memory\n\n```typescript\nawait client.delete({\n  memoryId: \"memory-uuid-here\"\n});\n```\n\n### Reset User Memories\n\n```typescript\nawait client.reset({\n  userId: \"user-123\"\n});\n```\n\n资料来源：[mem0-ts/README.md](https://github.com/mem0ai/mem0/blob/main/mem0-ts/README.md)\n\n## Open Source Module Structure\n\nThe OSS implementation follows a modular architecture with separate concerns for memory management, embedding, and storage.\n\n```mermaid\ngraph LR\n    A[Mem0Client] --> B[Memory Class]\n    B --> C[Embedding]\n    B --> D[Vector Store]\n    B --> E[SQLite DB]\n    C --> F[OpenAI Embeddings]\n    D --> G[Chroma/Qdrant/In-Memory]\n```\n\n### Memory Class\n\nThe `Memory` class orchestrates the OSS memory operations, coordinating between the embedding service, vector store, and SQLite database.\n\n| Method | Description |\n|--------|-------------|\n| `add` | Process and store new memories with embeddings |\n| `search` | Perform vector similarity search |\n| `get` | Retrieve memories by ID |\n| `delete` | Remove memory from all stores |\n| `reset` | Clear all user memories |\n\n资料来源：[mem0-ts/src/oss/src/memory/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/oss/src/memory/index.ts)\n\n## Message Format\n\nThe SDK uses a standardized message format for conversation history:\n\n```typescript\ninterface Message {\n  role: \"system\" | \"user\" | \"assistant\";\n  content: string;\n}\n```\n\nMessages are processed to extract semantic meaning and stored as discrete memory entries with associated event types.\n\n资料来源：[mem0-ts/src/oss/src/types/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/oss/src/types/index.ts)\n\n## Supported Embedders\n\n| Provider | Model Options | Configuration |\n|----------|---------------|---------------|\n| OpenAI | `text-embedding-3-small`, `text-embedding-3-large`, `text-embedding-ada-002` | `api_key` |\n| Local | Custom embedding models | `model_path` |\n\n资料来源：[mem0-ts/src/client/config.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/config.ts)\n\n## Supported Vector Stores\n\n| Provider | Description | Persistence |\n|----------|-------------|-------------|\n| Chroma | Open source vector database | Durable |\n| Qdrant | High-performance vector search | Durable |\n| In-memory | Temporary storage for testing | Volatile |\n\n## Event Types\n\nMemories are categorized by event types for organizational purposes:\n\n| Event Type | Usage |\n|------------|-------|\n| `memory` | General conversation memories |\n| `preference` | User preferences and settings |\n| `fact` | Factual information about users |\n| `knowledge` | Learned domain knowledge |\n\n资料来源：[mem0-ts/src/client/mem0.types.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/mem0.types.ts)\n\n## Workflow Diagram\n\n```mermaid\nsequenceDiagram\n    participant App as Application\n    participant Client as Mem0Client\n    participant API as mem0ai API\n    \n    App->>Client: new Mem0Client(config)\n    Note over Client: Initialize with config\n    \n    App->>Client: add({messages, userId})\n    Client->>API: POST /memories\n    API-->>Client: MemoryResult[]\n    Client-->>App: Promise<MemoryResult[]>\n    \n    App->>Client: search({query, userId})\n    Client->>API: POST /memories/search\n    API-->>Client: MemoryResult[]\n    Client-->>App: Promise<MemoryResult[]>\n    \n    App->>Client: getAll({userId})\n    Client->>API: GET /memories\n    API-->>Client: MemoryResult[]\n    Client-->>App: Promise<MemoryResult[]>\n```\n\n## Error Handling\n\nThe SDK uses standard JavaScript error handling patterns:\n\n```typescript\ntry {\n  const memories = await client.search({\n    query: \"test query\",\n    userId: \"user-123\"\n  });\n} catch (error) {\n  if (error.status === 401) {\n    console.error(\"Invalid API key\");\n  } else if (error.status === 404) {\n    console.error(\"Resource not found\");\n  } else {\n    console.error(\"Request failed:\", error.message);\n  }\n}\n```\n\n## Environment Variables\n\nWhile not required, the SDK supports environment-based configuration:\n\n```bash\nexport MEM0_API_KEY=\"your-api-key\"\nexport OPENAI_API_KEY=\"your-openai-key\"\n```\n\n## TypeScript Support\n\nThe SDK is written in TypeScript and provides full type definitions out of the box. No additional `@types` packages are required.\n\n```typescript\nimport { Mem0Client, Mem0Config, MemoryResult, Message } from \"mem0ai\";\n```\n\nAll exported types are available from the main package entry point.\n\n资料来源：[mem0-ts/src/client/index.ts](https://github.com/mem0ai/mem0/blob/main/mem0-ts/src/client/index.ts)\n\n## Quick Reference\n\n### Minimal Hosted Example\n\n```typescript\nimport { Mem0Client } from \"mem0ai\";\n\nconst client = new Mem0Client({ apiKey: \"your-key\" });\nconst memories = await client.add({\n  messages: [{ role: \"user\", content: \"Hello\" }],\n  userId: \"user-1\"\n});\n```\n\n### Minimal OSS Example\n\n```typescript\nimport { Mem0Client } from \"mem0ai\";\n\nconst client = new Mem0Client({\n  embedder: { provider: \"openai\", config: { api_key: \"key\", model: \"text-embedding-3-small\" } },\n  vectorStore: { provider: \"chroma\", config: { collection_name: \"memories\" } },\n  db: { provider: \"sqlite\" }\n});\n```\n\n资料来源：[mem0-ts/README.md](https://github.com/mem0ai/mem0/blob/main/mem0-ts/README.md)\n\n---\n\n---\n\n## Doramagic 踩坑日志\n\n项目：mem0ai/mem0\n\n摘要：发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。\n\n## 1. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:656099147 | https://github.com/mem0ai/mem0 | README/documentation is current enough for a first validation pass.\n\n## 2. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | last_activity_observed missing\n\n## 3. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:656099147 | https://github.com/mem0ai/mem0 | no_demo; severity=medium\n\n## 4. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:656099147 | https://github.com/mem0ai/mem0 | no_demo; severity=medium\n\n## 5. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | issue_or_pr_quality=unknown\n\n## 6. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | release_recency=unknown\n\n<!-- canonical_name: mem0ai/mem0; human_manual_source: deepwiki_human_wiki -->\n",
      "summary": "DeepWiki/Human Wiki 完整输出，末尾追加 Discovery Agent 踩坑日志。",
      "title": "Human Manual / 人类版说明书"
    },
    "pitfall_log": {
      "asset_id": "pitfall_log",
      "filename": "PITFALL_LOG.md",
      "markdown": "# Pitfall Log / 踩坑日志\n\n项目：mem0ai/mem0\n\n摘要：发现 6 个潜在踩坑项，其中 0 个为 high/blocking；最高优先级：能力坑 - 能力判断依赖假设。\n\n## 1. 能力坑 · 能力判断依赖假设\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：README/documentation is current enough for a first validation pass.\n- 对用户的影响：假设不成立时，用户拿不到承诺的能力。\n- 建议检查：将假设转成下游验证清单。\n- 防护动作：假设必须转成验证项；没有验证结果前不能写成事实。\n- 证据：capability.assumptions | github_repo:656099147 | https://github.com/mem0ai/mem0 | README/documentation is current enough for a first validation pass.\n\n## 2. 维护坑 · 维护活跃度未知\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：未记录 last_activity_observed。\n- 对用户的影响：新项目、停更项目和活跃项目会被混在一起，推荐信任度下降。\n- 建议检查：补 GitHub 最近 commit、release、issue/PR 响应信号。\n- 防护动作：维护活跃度未知时，推荐强度不能标为高信任。\n- 证据：evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | last_activity_observed missing\n\n## 3. 安全/权限坑 · 下游验证发现风险项\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：下游已经要求复核，不能在页面中弱化。\n- 建议检查：进入安全/权限治理复核队列。\n- 防护动作：下游风险存在时必须保持 review/recommendation 降级。\n- 证据：downstream_validation.risk_items | github_repo:656099147 | https://github.com/mem0ai/mem0 | no_demo; severity=medium\n\n## 4. 安全/权限坑 · 存在评分风险\n\n- 严重度：medium\n- 证据强度：source_linked\n- 发现：no_demo\n- 对用户的影响：风险会影响是否适合普通用户安装。\n- 建议检查：把风险写入边界卡，并确认是否需要人工复核。\n- 防护动作：评分风险必须进入边界卡，不能只作为内部分数。\n- 证据：risks.scoring_risks | github_repo:656099147 | https://github.com/mem0ai/mem0 | no_demo; severity=medium\n\n## 5. 维护坑 · issue/PR 响应质量未知\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：issue_or_pr_quality=unknown。\n- 对用户的影响：用户无法判断遇到问题后是否有人维护。\n- 建议检查：抽样最近 issue/PR，判断是否长期无人处理。\n- 防护动作：issue/PR 响应未知时，必须提示维护风险。\n- 证据：evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | issue_or_pr_quality=unknown\n\n## 6. 维护坑 · 发布节奏不明确\n\n- 严重度：low\n- 证据强度：source_linked\n- 发现：release_recency=unknown。\n- 对用户的影响：安装命令和文档可能落后于代码，用户踩坑概率升高。\n- 建议检查：确认最近 release/tag 和 README 安装命令是否一致。\n- 防护动作：发布节奏未知或过期时，安装说明必须标注可能漂移。\n- 证据：evidence.maintainer_signals | github_repo:656099147 | https://github.com/mem0ai/mem0 | release_recency=unknown\n",
      "summary": "用户实践前最可能遇到的身份、安装、配置、运行和安全坑。",
      "title": "Pitfall Log / 踩坑日志"
    },
    "prompt_preview": {
      "asset_id": "prompt_preview",
      "filename": "PROMPT_PREVIEW.md",
      "markdown": "# mem0 - Prompt Preview\n\n> Copy the prompt below into your AI host before installing anything.\n> Its purpose is to let you safely feel the project's workflow, not to claim the project has already run.\n\n## Copy this prompt\n\n```text\nYou are using an independent Doramagic capability pack for mem0ai/mem0.\n\nProject:\n- Name: mem0\n- Repository: https://github.com/mem0ai/mem0\n- Summary: Universal memory layer for AI Agents\n- Host target: chatgpt\n\nGoal:\nHelp me evaluate this project for the following task without installing it yet: Universal memory layer for AI Agents\n\nBefore taking action:\n1. Restate my task, success standard, and boundary.\n2. Identify whether the next step requires tools, browser access, network access, filesystem access, credentials, package installation, or host configuration.\n3. Use only the Doramagic Project Pack, the upstream repository, and the source-linked evidence listed below.\n4. If a real command, install step, API call, file write, or host integration is required, mark it as \"requires post-install verification\" and ask for approval first.\n5. If evidence is missing, say \"evidence is missing\" instead of filling the gap.\n\nPreviewable capabilities:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n\nCapabilities that require post-install verification:\n- Capability 1: Use the source-backed project context to guide one small, checkable workflow step.\n- Capability 2: Use the source-backed project context to guide one small, checkable workflow step.\n\nCore service flow:\n1. page-introduction: Introduction to Mem0. Produce one small intermediate artifact and wait for confirmation.\n2. page-quickstart: Quick Start Guide. Produce one small intermediate artifact and wait for confirmation.\n3. page-architecture: System Architecture. Produce one small intermediate artifact and wait for confirmation.\n4. page-memory-operations: Memory Operations. Produce one small intermediate artifact and wait for confirmation.\n5. page-ai-integration: AI Model Integration. Produce one small intermediate artifact and wait for confirmation.\n\nSource-backed evidence to keep in mind:\n- https://github.com/mem0ai/mem0\n- https://github.com/mem0ai/mem0#readme\n- mem0-plugin/skills/mem0/SKILL.md\n- mem0-plugin/skills/mem0-mcp/SKILL.md\n- openclaw/skills/memory-dream/SKILL.md\n- openclaw/skills/memory-triage/SKILL.md\n- skills/mem0/SKILL.md\n- skills/mem0-cli/SKILL.md\n- skills/mem0-integrate/SKILL.md\n- skills/mem0-test-integration/SKILL.md\n\nFirst response rules:\n1. Start Step 1 only.\n2. Explain the one service action you will perform first.\n3. Ask exactly three questions about my target workflow, success standard, and sandbox boundary.\n4. Stop and wait for my answers.\n\nStep 1 follow-up protocol:\n- After I answer the first three questions, stay in Step 1.\n- Produce six parts only: clarified task, success standard, boundary conditions, two or three options, tradeoffs for each option, and one recommendation.\n- End by asking whether I confirm the recommendation.\n- Do not move to Step 2 until I explicitly confirm.\n\nConversation rules:\n- Advance one step at a time and wait for confirmation after each small artifact.\n- Write outputs as recommendations or planned checks, not as completed execution.\n- Do not claim tests passed, files changed, commands ran, APIs were called, or the project was installed.\n- If the user asks for execution, first provide the sandbox setup, expected output, rollback, and approval checkpoint.\n```\n",
      "summary": "不安装项目也能感受能力节奏的安全试用 Prompt。",
      "title": "Prompt Preview / 安装前试用 Prompt"
    },
    "quick_start": {
      "asset_id": "quick_start",
      "filename": "QUICK_START.md",
      "markdown": "# Quick Start / 官方入口\n\n项目：mem0ai/mem0\n\n## 官方安装入口\n\n### Python / pip · 官方安装入口\n\n```bash\npip install mem0ai\n```\n\n来源：https://github.com/mem0ai/mem0#readme\n\n## 来源\n\n- repo: https://github.com/mem0ai/mem0\n- docs: https://github.com/mem0ai/mem0#readme\n",
      "summary": "从项目官方 README 或安装文档提取的开工入口。",
      "title": "Quick Start / 官方入口"
    }
  },
  "validation_id": "dval_6c80ffb7d0c04b96bdb9f585b93e58b2"
}
