chasm-cli 1.5.4

Universal chat session manager - harvest, merge, and analyze AI chat history from VS Code, Cursor, and other editors
Documentation
"""Compare similarly-sized working and broken session JSONL files directly."""
import json, os

WS_BASE = r"C:\Users\adamm\AppData\Roaming\Code\User\workspaceStorage"

# Pick similarly-sized sessions: working bc6d5655 (4947b) vs broken 6be29cba (4771b)
WORKING_FILE = os.path.join(WS_BASE, "82cdabb21413f2ff42168423e82c8bdf", "chatSessions", "bc6d5655-0778-4d67-8d68-660821103ca8.jsonl")
BROKEN_FILE = os.path.join(WS_BASE, "5ec71800c69c79b96b06a37e38537907", "chatSessions", "6be29cba-331e-4aa4-bc58-659cc20f4800.jsonl")

# Also check another working session of larger size
WORKING_FILE2 = os.path.join(WS_BASE, "82cdabb21413f2ff42168423e82c8bdf", "chatSessions", "3f5df584-fcdd-47a0-afbf-0695b881f33c.jsonl")

# Another broken session (AgenticFortress)
BROKEN_FILE2 = os.path.join(WS_BASE, "724ab159cbc91cdd8242d9b5aa690c3b", "chatSessions", "4e5dd6b4-ea53-475b-8f9e-8fad3bf59388.jsonl")

def parse_and_show(filepath, label):
    print(f"\n{'='*70}")
    print(f"{label}")
    print(f"File: {os.path.basename(filepath)}")
    print(f"Size: {os.path.getsize(filepath)} bytes")
    print(f"{'='*70}")
    
    # Read raw bytes first
    with open(filepath, 'rb') as f:
        raw = f.read()
    
    print(f"Total bytes: {len(raw)}")
    print(f"Ends with newline: {raw.endswith(b'\\n')}")
    print(f"Newline count: {raw.count(b'\\n')}")
    print(f"CRLF count: {raw.count(b'\\r\\n')}")
    
    # Split into lines
    lines = raw.decode('utf-8').split('\n')
    non_empty = [l for l in lines if l.strip()]
    print(f"Total lines: {len(lines)}, non-empty: {len(non_empty)}")
    
    # Parse each non-empty line
    objects = []
    for i, line in enumerate(non_empty):
        try:
            obj = json.loads(line.strip())
            objects.append(obj)
        except json.JSONDecodeError as e:
            print(f"\n  PARSE ERROR on line {i+1}: {e}")
            print(f"  First 200 chars: {line[:200]}")
            # Try to detect concatenated JSON
            brace_depth = 0
            first_end = -1
            for ci, ch in enumerate(line):
                if ch == '{': brace_depth += 1
                elif ch == '}': 
                    brace_depth -= 1
                    if brace_depth == 0:
                        first_end = ci
                        break
            if first_end > 0 and first_end < len(line) - 1:
                print(f"  CONCATENATED JSON detected! First object ends at char {first_end}")
                print(f"  Char after first object: '{line[first_end+1]}'")
                # Try parsing just the first object
                try:
                    first_obj = json.loads(line[:first_end+1])
                    objects.append(first_obj)
                    print(f"  First object parsed OK, keys: {sorted(first_obj.keys())}")
                except:
                    print(f"  Couldn't parse first object either")
    
    print(f"\nParsed objects: {len(objects)}")
    
    for i, obj in enumerate(objects):
        print(f"\n--- Object {i} ---")
        print(f"Top-level keys: {sorted(obj.keys())}")
        
        for key in sorted(obj.keys()):
            val = obj[key]
            if isinstance(val, dict):
                sub_keys = sorted(val.keys())
                if len(sub_keys) > 10:
                    print(f"  {key}: dict({len(sub_keys)} keys) = {sub_keys[:5]}...")
                else:
                    print(f"  {key}: dict = {sub_keys}")
                # Show sub-dict values for small dicts
                if len(sub_keys) <= 15:
                    for sk in sub_keys:
                        sv = val[sk]
                        if isinstance(sv, dict):
                            print(f"    {sk}: dict({len(sv)} keys) = {sorted(sv.keys())}")
                        elif isinstance(sv, list):
                            print(f"    {sk}: list[{len(sv)}]")
                            if sv and isinstance(sv[0], dict):
                                print(f"      [0] keys: {sorted(sv[0].keys())}")
                        elif isinstance(sv, str) and len(sv) > 100:
                            print(f"    {sk}: str({len(sv)}) = {sv[:80]}...")
                        else:
                            print(f"    {sk}: {json.dumps(sv)}")
            elif isinstance(val, list):
                print(f"  {key}: list[{len(val)}]")
                if val:
                    if isinstance(val[0], dict):
                        print(f"    [0] keys: {sorted(val[0].keys())}")
                        for k0, v0 in sorted(val[0].items()):
                            if isinstance(v0, dict):
                                print(f"      {k0}: dict = {sorted(v0.keys())}")
                            elif isinstance(v0, list):
                                print(f"      {k0}: list[{len(v0)}]")
                                if v0 and isinstance(v0[0], dict):
                                    print(f"        [0] keys: {sorted(v0[0].keys())}")
                            elif isinstance(v0, str) and len(v0) > 100:
                                print(f"      {k0}: str({len(v0)})")
                            else:
                                print(f"      {k0}: {json.dumps(v0)}")
                    elif isinstance(val[0], str):
                        print(f"    [0]: {json.dumps(val[0][:100])}")
            elif isinstance(val, str) and len(val) > 200:
                print(f"  {key}: str({len(val)}) = {val[:100]}...")
            else:
                print(f"  {key}: {json.dumps(val)}")
    
    return objects

# Also check: what does a skeleton/empty session look like?
def check_skeleton(filepath, label):
    print(f"\n{'='*70}")
    print(f"SKELETON CHECK: {label}")
    print(f"{'='*70}")
    with open(filepath, 'r', encoding='utf-8') as f:
        content = f.read()
    print(f"Size: {len(content)} bytes")
    print(f"Content (first 2000 chars):\n{content[:2000]}")

# Run comparisons
print("="*70)
print("DETAILED SESSION FILE COMPARISON")
print("="*70)

w1 = parse_and_show(WORKING_FILE, "WORKING session bc6d5655 (4947b)")
b1 = parse_and_show(BROKEN_FILE, "BROKEN session 6be29cba (4771b)")

# Check if the skeleton sessions (1319b) parse correctly
skeleton_working = os.path.join(WS_BASE, "82cdabb21413f2ff42168423e82c8bdf", "chatSessions", "0658dacb-1b0e-45d4-b54e-d6e3611c643d.jsonl")
skeleton_broken = os.path.join(WS_BASE, "5ec71800c69c79b96b06a37e38537907", "chatSessions", "1aab540b-8230-44c5-b6c1-3248fa997cac.jsonl")

if os.path.exists(skeleton_working):
    check_skeleton(skeleton_working, "Working skeleton 0658dacb (418b)")
if os.path.exists(skeleton_broken):
    check_skeleton(skeleton_broken, "Broken skeleton 1aab540b (1319b)")

# Also check a medium working session  
w2 = parse_and_show(WORKING_FILE2, "WORKING session 3f5df584 (125508b)")
b2 = parse_and_show(BROKEN_FILE2, "BROKEN session 4e5dd6b4-AgenticFortress (11775b)")