chasm-cli 1.5.4

Universal chat session manager - harvest, merge, and analyze AI chat history from VS Code, Cursor, and other editors
Documentation
"""Inspect the full .json.bak session backup to understand the complete session data."""
import json, os

path = r"C:\Users\adamm\AppData\Roaming\Code\User\workspaceStorage\5ec71800c69c79b96b06a37e38537907\chatSessions\6be29cba-331e-4aa4-bc58-659cc20f4800.json.bak"
size = os.path.getsize(path)
print(f"File size: {size:,} bytes ({size/1024/1024:.1f} MB)")

with open(path, "r", encoding="utf-8") as f:
    data = json.load(f)

print(f"Type: {type(data).__name__}")
print(f"Top-level keys: {sorted(data.keys())}")
print(f"Version: {data.get('version')}")
print(f"Session ID: {data.get('sessionId')}")
print(f"Custom title: {data.get('customTitle')}")
print(f"Creation date: {data.get('creationDate')}")
print(f"hasPendingEdits: {data.get('hasPendingEdits')}")
print(f"pendingRequests: {len(data.get('pendingRequests', []))}")

reqs = data.get("requests", [])
print(f"\nNum requests: {len(reqs)}")

if reqs:
    print(f"First request timestamp: {reqs[0].get('timestamp')}")
    print(f"Last request timestamp: {reqs[-1].get('timestamp')}")
    
    # Count requests by modelState
    states = {}
    for r in reqs:
        ms = r.get("modelState", {})
        if isinstance(ms, dict):
            v = ms.get("value", "missing")
        else:
            v = "non-dict"
        states[v] = states.get(v, 0) + 1
    print(f"ModelState distribution: {states}")
    
    # Check for missing ranges
    missing_ranges = 0
    total_parts = 0
    for r in reqs:
        msg = r.get("message", {})
        if isinstance(msg, dict):
            for part in msg.get("parts", []):
                total_parts += 1
                if "range" not in part:
                    missing_ranges += 1
    print(f"Message parts: {total_parts} total, {missing_ranges} missing range")
    
    # Check response parts
    total_resp_parts = 0
    for r in reqs:
        resp = r.get("response")
        if isinstance(resp, list):
            total_resp_parts += len(resp)
    print(f"Total response parts across all requests: {total_resp_parts}")
    
    # Sample first 3 requests
    print("\n--- First 3 requests ---")
    for i, r in enumerate(reqs[:3]):
        msg = r.get("message", {})
        msg_text = msg.get("text", msg) if isinstance(msg, dict) else str(msg)
        if isinstance(msg_text, str):
            msg_text = msg_text[:100]
        resp_count = len(r.get("response", []) or [])
        ms = r.get("modelState", {})
        agent = r.get("agent", {})
        agent_id = agent.get("id", "none") if isinstance(agent, dict) else "none"
        print(f"  [{i}] agent={agent_id} state={ms} resp_parts={resp_count}")
        print(f"       msg: {msg_text}")
    
    # Sample last 3 requests
    print("\n--- Last 3 requests ---")
    for i in range(max(0, len(reqs)-3), len(reqs)):
        r = reqs[i]
        msg = r.get("message", {})
        msg_text = msg.get("text", msg) if isinstance(msg, dict) else str(msg)
        if isinstance(msg_text, str):
            msg_text = msg_text[:100]
        resp_count = len(r.get("response", []) or [])
        ms = r.get("modelState", {})
        agent = r.get("agent", {})
        agent_id = agent.get("id", "none") if isinstance(agent, dict) else "none"
        print(f"  [{i}] agent={agent_id} state={ms} resp_parts={resp_count}")
        print(f"       msg: {msg_text}")