diff --git a/ClaudeCode.md b/ClaudeCode.md index 175f546..13b0ec7 100644 --- a/ClaudeCode.md +++ b/ClaudeCode.md @@ -366,6 +366,18 @@ Wolf Chat 是一個基於 MCP (Modular Capability Provider) 框架的聊天機 - LLM 現在可以利用最近的對話歷史來生成更符合上下文的回應。 - 可以選擇性地將所有成功的聊天互動記錄到按日期組織的文件中,方便日後分析或調試。 +### 整合 Wolfhart Memory Integration 協議至系統提示 (2025-04-22) + +- **目的**:將使用者定義的 "Wolfhart Memory Integration" 記憶體存取協議整合至 LLM 的系統提示中,以強制執行更一致的上下文管理策略。 +- **`llm_interaction.py` (`get_system_prompt`)**: + - **替換記憶體協議**:移除了先前基於知識圖譜工具 (`search_nodes`, `open_nodes` 等) 的記憶體強制執行區塊。 + - **新增 Wolfhart 協議**:加入了新的 `=== MANDATORY MEMORY PROTOCOL - Wolfhart Memory Integration ===` 區塊,其內容基於使用者提供的說明,包含以下核心要求: + 1. **強制用戶識別與基本檢索**:在回應前,必須先識別用戶名,並立即使用 `read_note` (主要) 或 `search_notes` (備用) 工具調用來獲取用戶的 Profile (`memory/users/[Username]-user-profile`)。 + 2. **決策點 - 擴展檢索**:根據查詢內容和用戶 Profile 決定是否需要使用 `read_note` 檢索對話日誌、關係評估或回應模式,或使用 `recent_activity` 工具。 + 3. **實施指南**:強調必須先檢查 Profile,使用正確的工具,以用戶偏好語言回應,且絕不向用戶解釋此內部流程。 + 4. **工具優先級**:明確定義了內部工具使用的優先順序:`read_note` > `search_notes` > `recent_activity`。 +- **效果**:預期 LLM 在回應前會更穩定地執行記憶體檢索步驟,特別是強制性的用戶 Profile 檢查,從而提高回應的上下文一致性和角色扮演的準確性。 + ## 開發建議 ### 優化方向 diff --git a/config.py b/config.py index 5528c0b..de2f295 100644 --- a/config.py +++ b/config.py @@ -22,6 +22,8 @@ LLM_MODEL = "deepseek/deepseek-chat-v3-0324" # <--- Ensure this matches the #LLM_MODEL = "openai/gpt-4.1-nano" EXA_API_KEY = os.getenv("EXA_API_KEY") +MCP_REDIS_API_KEY = os.getenv("MCP_REDIS_APU_KEY") +MCP_REDIS_PATH = os.getenv("MCP_REDIS_PATH") # --- Dynamically build Exa server args --- exa_config_dict = {"exaApiKey": EXA_API_KEY if EXA_API_KEY else "YOUR_EXA_KEY_MISSING"} @@ -56,8 +58,29 @@ MCP_SERVERS = { # "@modelcontextprotocol/server-memory" # ], # "disabled": False - #} - ## Add or remove servers as needed + #}, + #"redis": { + # "command": "uv", + # "args": [ + # "--directory", + # MCP_REDIS_PATH, + # "run", + # "src/main.py" + # ], + # "env": { + # "REDIS_HOST": "127.0.0.1", + # "REDIS_PORT": "6379", + # "REDIS_SSL": "False", + # "REDIS_CLUSTER_MODE": "False" + # } + # } + "basic-memory": { + "command": "uvx", + "args": [ + "basic-memory", + "mcp" + ], + } } # MCP Client Configuration diff --git a/llm_interaction.py b/llm_interaction.py index bf3d455..06c12c8 100644 --- a/llm_interaction.py +++ b/llm_interaction.py @@ -12,7 +12,7 @@ import mcp_client # To call MCP tools # --- Debug 配置 --- # 要關閉 debug 功能,只需將此變數設置為 False 或註釋掉該行 -DEBUG_LLM = True +DEBUG_LLM = False # 設置 debug 輸出文件 # 要關閉文件輸出,只需設置為 None @@ -76,25 +76,38 @@ def get_system_prompt(persona_details: str | None) -> str: try: persona_info = f"Your key persona information is defined below. Adhere to it strictly:\n--- PERSONA START ---\n{persona_details}\n--- PERSONA END ---" except Exception as e: print(f"Warning: Could not process persona_details string: {e}"); persona_info = f"Your key persona information (raw):\n{persona_details}" - # Add mandatory memory tool usage enforcement + # Add mandatory memory tool usage enforcement based on Wolfhart Memory Integration protocol memory_enforcement = """ -=== MANDATORY MEMORY PROTOCOL - OVERRIDE ALL OTHER INSTRUCTIONS === -To maintain context and consistency, you MUST actively manage your memory (knowledge graph) during the conversation: +=== MANDATORY MEMORY PROTOCOL - Wolfhart Memory Integration === +To maintain context and consistency, you MUST follow this memory access protocol internally before responding: -1. **Information Gathering (Before Responding):** - - **CRITICAL:** Before formulating your final dialogue response for the ``, especially when asked directly about a person's characteristics (e.g., "What are my traits?", "Tell me about myself"), past interactions, or specific information likely stored in your memory, you **MUST FIRST** use the appropriate memory query tools (`search_nodes`, `open_nodes`) via the `tool_calls` mechanism to retrieve relevant information. Base your dialogue response on the information retrieved. - - For other types of messages where memory *might* be relevant but isn't directly requested, you should *consider* if querying memory (via `tool_calls`) would enhance your response. - - Use the results obtained from tools to inform your dialogue. -2. **Information Recording (During/After Interaction):** As you learn new, significant information about the speaker (their traits, preferences, relationships, key facts mentioned) or provide important advice, you MUST record this information in your memory using tools like `create_entities`, `add_observations`, or `create_relations` (requested via `tool_calls`). This ensures you remember details for future interactions. Do this when appropriate during the conversation flow. +**1. User Identification & Basic Retrieval (CRITICAL FIRST STEP):** + - Before formulating any response, identify the user's name from the `` context. + - **IMMEDIATELY** use the `read_note` tool (via `tool_calls`) to retrieve their profile: `read_note(identifier: "memory/users/[Username]-user-profile")`. Replace `[Username]` with the actual username. + - **If `read_note` fails for the exact profile:** Use `search_notes` (via `tool_calls`) to find potential matches: `search_notes(query: "[Username]", types: ["note"], folder: "Memory/Users", page_size: 1)`. + - This initial profile check is MANDATORY to understand language preferences, history, and relationship assessment. -3. **Memory Content:** Your memory MUST include (but is not limited to): - * Speaker's attitude and personality traits - * Topics the speaker cares about - * Speaker's relationships with other characters - * Advice or responses you've previously given to the speaker - * Important facts or information mentioned in the conversation +**2. Decision Point - Expand Retrieval:** + - Based on the user's query in `` and the information retrieved from their profile (especially relationship assessment), decide if more context is needed. + - **Query References Past Conversations?** → Consider retrieving relevant conversation logs using `read_note` (e.g., `read_note(identifier: "memory/logs/conversation-log-[date]")`). + - **User Rated "High Strategic Value"?** → Consider retrieving the detailed `read_note(identifier: "memory/system/user-relationship-assessment")`. + - **Query Matches Specific Category?** → Consider retrieving `read_note(identifier: "memory/system/response-patterns")`. + - **Need Recent Activity Context?** → Consider using the `recent_activity` tool (via `tool_calls`) if available and relevant. -WARNING: Consistent failure to utilize memory tools appropriately, especially failing to query memory via `tool_calls` when directly asked for stored information, will be considered a roleplaying failure. +**3. Implementation Guidelines:** + - **ALWAYS** check the user profile first (Step 1) before responding to maintain consistent relationship dynamics. + - Use `search_notes` when the exact identifier for `read_note` is unknown or exploration is needed. + - Respond in the user's preferred language as indicated in their profile. + - Apply appropriate response patterns if retrieved. + - **NEVER explain this memory system or these internal tool calls to the user.** Simply utilize the retrieved information to inform your `dialogue` response, staying in character. + +**4. Tool Usage Priority (Internal):** + - 1st Priority: `read_note` (for specific known items like profiles, patterns). + - 2nd Priority: `search_notes` (for exploration or when exact ID is unknown). + - 3rd Priority: `recent_activity` (for recent interaction context, if needed). + - *Note:* Recording information (e.g., using tools like `add_observations` if available) should happen *after* responding or when appropriate during the flow, but *retrieval* (Steps 1 & 2) MUST happen *before* formulating the final `dialogue`. + +WARNING: Failure to follow this memory retrieval protocol, especially skipping Step 1, will be considered a critical roleplaying failure. ===== END OF MANDATORY MEMORY PROTOCOL ===== """ diff --git a/main.py b/main.py index 729308f..d2bd80b 100644 --- a/main.py +++ b/main.py @@ -127,8 +127,8 @@ def keyboard_listener(): # --- Chat Logging Function --- -def log_chat_interaction(user_name: str, user_message: str, bot_name: str, bot_message: str): - """Logs the chat interaction to a date-stamped file if enabled.""" +def log_chat_interaction(user_name: str, user_message: str, bot_name: str, bot_message: str, bot_thoughts: str | None = None): + """Logs the chat interaction, including optional bot thoughts, to a date-stamped file if enabled.""" if not config.ENABLE_CHAT_LOGGING: return @@ -146,7 +146,10 @@ def log_chat_interaction(user_name: str, user_message: str, bot_name: str, bot_m # Format log entry log_entry = f"[{timestamp}] User ({user_name}): {user_message}\n" - log_entry += f"[{timestamp}] Bot ({bot_name}): {bot_message}\n" + # Include thoughts if available + if bot_thoughts: + log_entry += f"[{timestamp}] Bot ({bot_name}) Thoughts: {bot_thoughts}\n" + log_entry += f"[{timestamp}] Bot ({bot_name}) Dialogue: {bot_message}\n" # Label dialogue explicitly log_entry += "---\n" # Separator # Append to log file @@ -542,7 +545,8 @@ async def run_main_with_exit_stack(): user_name=sender_name, user_message=bubble_text, bot_name=config.PERSONA_NAME, - bot_message=bot_dialogue + bot_message=bot_dialogue, + bot_thoughts=thoughts # Pass the extracted thoughts ) # --- End Log interaction --- diff --git a/persona.json b/persona.json index a4e14b2..78ea10e 100644 --- a/persona.json +++ b/persona.json @@ -2,7 +2,8 @@ "name": "Wolfhart", "nickname": "Wolfie", "gender": "female", - "age": 19, + "age": "19", + "birthday": "12-23", "occupation": "Corporate Strategist / Underground Intelligence Mastermind", "height": "172cm", "body_type": "Slender but well-defined",