{html_mod.escape(older)} and "
+ f"{html_mod.escape(newer)} both appear in this "
+ f"project. Re-run session-metrics --compare last-"
+ f"{html_mod.escape(older)} last-{html_mod.escape(newer)} "
+ f"to refresh attribution numbers with your latest sessions."
+ )
+ else:
+ headline = f"{n_families} model families detected"
+ body = (
+ f" in this project's sessions — "
+ f"{html_mod.escape(older)} and "
+ f"{html_mod.escape(newer)}. "
+ f"Run session-metrics --compare-prep to set up a "
+ f"controlled comparison that isolates tokenizer / output-length "
+ f"effects from workload shift."
+ )
+ return {
+ "id": "model_compare",
+ "headline": headline,
+ "body": body,
+ "value": float(n_families),
+ "threshold": 2.0,
+ "shown": True,
+ "always_on": True,
+ }
+
+
+def _compute_usage_insights(report: dict) -> list[dict]:
+ """Compute the Usage Insights candidate list. See module-level
+ `_INSIGHT_*` constants for thresholds. Each entry:
+ {id, headline, body, value, threshold, shown, always_on}
+ Returns `[]` if total cost is zero (avoids percentage division by zero).
+ """
+ totals = report.get("totals", {}) or {}
+ total_cost = float(totals.get("cost", 0.0) or 0.0)
+ if total_cost <= 0:
+ return []
+
+ sessions = report.get("sessions", []) or []
+ blocks = report.get("session_blocks", []) or []
+ tz_off = float(report.get("tz_offset_hours", 0.0) or 0.0)
+ all_turns = [t for s in sessions for t in s.get("turns", [])]
+ total_turns = len(all_turns)
+ candidates: list[dict] = []
+
+ # 1. Parallel sessions — cost from 5h blocks where multiple sessions touched the window.
+ parallel_cost = sum(b.get("cost_usd", 0.0) for b in blocks
+ if len(b.get("sessions_touched") or []) > 1)
+ parallel_pct = 100.0 * parallel_cost / total_cost
+ candidates.append({
+ "id": "parallel_sessions",
+ "headline": f"{parallel_pct:.0f}%",
+ "body": f" of cost came from 5-hour windows where you ran more than one session in parallel — concurrent sessions share the same rate-limit window.",
+ "value": parallel_pct,
+ "threshold": _INSIGHT_PARALLEL_PCT_THRESHOLD,
+ "shown": parallel_pct >= _INSIGHT_PARALLEL_PCT_THRESHOLD,
+ "always_on": False,
+ })
+
+ # 2. Long sessions — cost share from sessions ≥ 8h wall-clock.
+ long_cutoff = _INSIGHT_LONG_SESSION_HOURS * 3600
+ long_cost = sum(s.get("subtotal", {}).get("cost", 0.0)
+ for s in sessions
+ if s.get("duration_seconds", 0) >= long_cutoff)
+ long_pct = 100.0 * long_cost / total_cost
+ candidates.append({
+ "id": "long_sessions",
+ "headline": f"{long_pct:.0f}%",
+ "body": f" of cost came from sessions active for {_INSIGHT_LONG_SESSION_HOURS}+ hours — long-lived sessions accumulate context cost over time.",
+ "value": long_pct,
+ "threshold": _INSIGHT_LONG_SESSION_PCT_THRESHOLD,
+ "shown": long_pct >= _INSIGHT_LONG_SESSION_PCT_THRESHOLD,
+ "always_on": False,
+ })
+
+ # 3. Big-context turns — cost share of turns where total input ≥ 150k.
+ big_ctx_cost = sum(t.get("cost_usd", 0.0) for t in all_turns
+ if _turn_total_input(t) >= _INSIGHT_BIG_CONTEXT_TOKENS)
+ big_ctx_pct = 100.0 * big_ctx_cost / total_cost
+ candidates.append({
+ "id": "big_context_turns",
+ "headline": f"{big_ctx_pct:.0f}%",
+ "body": f" of cost was spent on turns with ≥{_INSIGHT_BIG_CONTEXT_TOKENS // 1000}k context filled — `/compact` mid-task or `/clear` between tasks keeps the running input down.",
+ "value": big_ctx_pct,
+ "threshold": _INSIGHT_BIG_CONTEXT_PCT_THRESHOLD,
+ "shown": big_ctx_pct >= _INSIGHT_BIG_CONTEXT_PCT_THRESHOLD,
+ "always_on": False,
+ })
+
+ # 4. Big cache misses — cost share of turns sending ≥ 100k uncached input.
+ miss_cost = sum(t.get("cost_usd", 0.0) for t in all_turns
+ if (t.get("input_tokens", 0) + t.get("cache_write_tokens", 0))
+ >= _INSIGHT_BIG_CACHE_MISS_TOKENS)
+ miss_pct = 100.0 * miss_cost / total_cost
+ candidates.append({
+ "id": "big_cache_misses",
+ "headline": f"{miss_pct:.0f}%",
+ "body": f" of cost came from turns with ≥{_INSIGHT_BIG_CACHE_MISS_TOKENS // 1000}k tokens of uncached input — typically a cold-start after a session went idle, or a large new prompt that wasn't cached.",
+ "value": miss_pct,
+ "threshold": _INSIGHT_BIG_CACHE_MISS_PCT_THRESHOLD,
+ "shown": miss_pct >= _INSIGHT_BIG_CACHE_MISS_PCT_THRESHOLD,
+ "always_on": False,
+ })
+
+ # 5. Subagent-heavy sessions — cost share from sessions with ≥ 3 Task calls.
+ subagent_cost = sum(s.get("subtotal", {}).get("cost", 0.0)
+ for s in sessions
+ if _session_task_count(s) >= _INSIGHT_SUBAGENT_TASK_COUNT)
+ subagent_pct = 100.0 * subagent_cost / total_cost
+ candidates.append({
+ "id": "subagent_heavy",
+ "headline": f"{subagent_pct:.0f}%",
+ "body": f" of cost came from sessions that ran {_INSIGHT_SUBAGENT_TASK_COUNT}+ subagent dispatches (Task tool) — each subagent runs its own request loop.",
+ "value": subagent_pct,
+ "threshold": _INSIGHT_SUBAGENT_PCT_THRESHOLD,
+ "shown": subagent_pct >= _INSIGHT_SUBAGENT_PCT_THRESHOLD,
+ "always_on": False,
+ })
+
+ # 6. Tool dominance — top-3 tool names' share of all tool calls.
+ name_counts: dict[str, int] = {}
+ for t in all_turns:
+ for name in (t.get("tool_use_names") or []):
+ name_counts[name] = name_counts.get(name, 0) + 1
+ total_tool_calls = sum(name_counts.values())
+ if total_tool_calls >= _INSIGHT_TOOL_DOMINANCE_MIN_CALLS:
+ ranked = sorted(name_counts.items(), key=lambda x: (-x[1], x[0]))
+ top3 = ranked[:3]
+ top3_share = 100.0 * sum(c for _, c in top3) / total_tool_calls
+ names_str = ", ".join(html_mod.escape(n) for n, _ in top3)
+ candidates.append({
+ "id": "top3_tools",
+ "headline": f"{top3_share:.0f}%",
+ "body": f" of all tool calls were {names_str} — your top-3 tools dominate this {total_tool_calls:,}-call workload.",
+ "value": top3_share,
+ "threshold": 0.0,
+ "shown": True,
+ "always_on": False,
+ })
+ else:
+ candidates.append({
+ "id": "top3_tools",
+ "headline": "0%",
+ "body": " (insufficient tool-call volume).",
+ "value": 0.0,
+ "threshold": 0.0,
+ "shown": False,
+ "always_on": False,
+ })
+
+ # 7. Off-peak share — cost share with timestamps outside 09:00–18:00 local weekday.
+ off_peak_cost = sum(t.get("cost_usd", 0.0) for t in all_turns
+ if _is_off_peak_local(_parse_iso_epoch(t.get("timestamp", "")), tz_off))
+ off_peak_pct = 100.0 * off_peak_cost / total_cost
+ candidates.append({
+ "id": "off_peak_share",
+ "headline": f"{off_peak_pct:.0f}%",
+ "body": f" of cost happened outside business hours (before 09:00, after 18:00, or on weekends in your local timezone) — heads-up that long-running subagents while you're AFK still bill.",
+ "value": off_peak_pct,
+ "threshold": _INSIGHT_OFF_PEAK_PCT_THRESHOLD,
+ "shown": off_peak_pct >= _INSIGHT_OFF_PEAK_PCT_THRESHOLD,
+ "always_on": False,
+ })
+
+ # 8. Cost concentration — top-N turns' cost share (gated on total turns ≥ 10).
+ if total_turns >= _INSIGHT_COST_CONCENTRATION_MIN_TURNS:
+ sorted_costs = sorted((t.get("cost_usd", 0.0) for t in all_turns), reverse=True)
+ topn_share = 100.0 * sum(sorted_costs[:_INSIGHT_COST_CONCENTRATION_TOP_N]) / total_cost
+ candidates.append({
+ "id": "cost_concentration",
+ "headline": f"{topn_share:.0f}%",
+ "body": f" of cost was driven by just the top {_INSIGHT_COST_CONCENTRATION_TOP_N} most-expensive turns out of {total_turns:,} total — a few large turns dominate the bill.",
+ "value": topn_share,
+ "threshold": _INSIGHT_COST_CONCENTRATION_PCT,
+ "shown": topn_share >= _INSIGHT_COST_CONCENTRATION_PCT,
+ "always_on": False,
+ })
+ else:
+ candidates.append({
+ "id": "cost_concentration",
+ "headline": "0%",
+ "body": " (too few turns to call concentration meaningful).",
+ "value": 0.0,
+ "threshold": _INSIGHT_COST_CONCENTRATION_PCT,
+ "shown": False,
+ "always_on": False,
+ })
+
+ # 9. Model mix — cost share by family, shown iff ≥ 2 families seen.
+ family_cost: dict[str, float] = {}
+ for t in all_turns:
+ fam = _model_family(t.get("model", ""))
+ family_cost[fam] = family_cost.get(fam, 0) + t.get("cost_usd", 0.0)
+ families_used = [f for f, c in family_cost.items() if c > 0]
+ if len(families_used) >= 2:
+ ranked_fams = sorted(family_cost.items(), key=lambda x: -x[1])
+ parts = [f"{html_mod.escape(f)} {100.0 * c / total_cost:.0f}%"
+ for f, c in ranked_fams if c > 0]
+ candidates.append({
+ "id": "model_mix",
+ "headline": f"{len(families_used)} families",
+ "body": f" — cost split: {' · '.join(parts)}.",
+ "value": float(len(families_used)),
+ "threshold": 2.0,
+ "shown": True,
+ "always_on": True,
+ })
+ else:
+ candidates.append({
+ "id": "model_mix",
+ "headline": "1 family",
+ "body": " (single-model project).",
+ "value": 1.0,
+ "threshold": 2.0,
+ "shown": False,
+ "always_on": True,
+ })
+
+ # 10. Session pacing — turn-count distribution + duration extremes (≥ 2 sessions).
+ if len(sessions) >= 2:
+ durations = [s.get("duration_seconds", 0) for s in sessions if s.get("duration_seconds", 0) > 0]
+ turn_counts = [len(s.get("turns", [])) for s in sessions]
+ median_dur = _percentile(durations, 50) if durations else 0
+ longest_dur = max(durations) if durations else 0
+ tc_min = min(turn_counts) if turn_counts else 0
+ tc_max = max(turn_counts) if turn_counts else 0
+ tc_avg = (sum(turn_counts) / len(turn_counts)) if turn_counts else 0
+ tc_p95 = _percentile([float(x) for x in turn_counts], 95) if turn_counts else 0
+ candidates.append({
+ "id": "session_pacing",
+ "headline": f"{len(sessions)} sessions",
+ "body": (f" — median duration {_fmt_long_duration(median_dur)}, longest {_fmt_long_duration(longest_dur)};"
+ f" turns/session min {tc_min:,} · avg {tc_avg:.0f} · p95 {int(tc_p95):,} · max {tc_max:,}."),
+ "value": float(len(sessions)),
+ "threshold": 2.0,
+ "shown": True,
+ "always_on": True,
+ })
+ else:
+ candidates.append({
+ "id": "session_pacing",
+ "headline": "1 session",
+ "body": " (no distribution to summarise).",
+ "value": 1.0,
+ "threshold": 2.0,
+ "shown": False,
+ "always_on": True,
+ })
+
+ # 11. Model compare hint — fires when the project has ≥2 distinct
+ # model families. Gated behind a state marker so the card escalates
+ # from "hint you can run a benchmark" to "re-run for fresh numbers"
+ # once the user actually tries --compare. Suppressed CLI-side via
+ # --no-model-compare-insight.
+ if not report.get("_suppress_model_compare_insight"):
+ mc = _compute_model_compare_insight(report)
+ if mc is not None:
+ candidates.append(mc)
+
+ return candidates
+
+
+def _build_report(
+ mode: str,
+ slug: str,
+ sessions_raw: list[tuple[str, list[dict], list[int]]],
+ tz_offset_hours: float = 0.0,
+ tz_label: str = "UTC",
+ peak: dict | None = None,
+ suppress_model_compare_insight: bool = False,
+ cache_break_threshold: int = _CACHE_BREAK_DEFAULT_THRESHOLD,
+ subagent_attribution: bool = True,
+ sort_prompts_by: str | None = None,
+ include_subagents: bool = False,
+) -> dict:
+ """Build a structured report dict from raw session data.
+
+ Args:
+ mode: ``"session"`` for single-session or ``"project"`` for all sessions.
+ slug: Project slug derived from the working directory path.
+ sessions_raw: List of ``(session_id, assistant_turns, user_epoch_secs)``
+ triples in chronological order (oldest first). ``assistant_turns``
+ are raw JSONL entries for assistant messages; ``user_epoch_secs``
+ are sorted UTC epoch-seconds for non-meta user entries.
+
+ Returns:
+ Report dict containing ``sessions`` (list), ``totals``, ``models``,
+ and ``time_of_day`` (project-wide). Each session entry also has its
+ own ``time_of_day`` for per-session breakdowns.
+ """
+ sessions_out = []
+ global_idx = 1
+ attribution_summary = {
+ "attributed_turns": 0,
+ "orphan_subagent_turns": 0,
+ "nested_levels_seen": 0,
+ "cycles_detected": 0,
+ }
+
+ for session_id, raw_turns, user_ts in sessions_raw:
+ turn_records = [_build_turn_record(global_idx + i, t, tz_offset_hours)
+ for i, t in enumerate(raw_turns)]
+ global_idx += len(turn_records)
+ # Phase-B (v1.7.0): subagent → parent-prompt attribution. Anchor
+ # computation must precede attribution; both modify turn records
+ # in place. Always-on by default; ``--no-subagent-attribution``
+ # suppresses Pass 3's accumulation while still computing anchors
+ # so other features (sort tie-breaks) keep working.
+ _compute_prompt_anchor_indices(turn_records)
+ if subagent_attribution:
+ session_summary = _attribute_subagent_tokens(turn_records)
+ for k, v in session_summary.items():
+ if k == "nested_levels_seen":
+ attribution_summary[k] = max(attribution_summary[k], v)
+ else:
+ attribution_summary[k] += v
+ resumes = _build_resumes(turn_records)
+ # Stamp `is_terminal_exit_marker` onto the last-turn marker (if any) so
+ # the timeline divider can distinguish "user came back" from "user's
+ # most recent /exit with no subsequent work in this JSONL". The
+ # dashboard card already splits these in its sublabel; the timeline
+ # needs the same distinction to stay internally consistent.
+ for r in resumes:
+ if r["terminal"]:
+ idx = r["turn_index"]
+ for t in turn_records:
+ if t["index"] == idx:
+ t["is_terminal_exit_marker"] = True
+ break
+ # Raw epoch span — used by usage-insights (long_sessions, session_pacing).
+ # Computed here while raw_turns is still in scope; the formatted
+ # display strings would be brittle to re-parse for arithmetic.
+ first_epoch = _parse_iso_epoch(raw_turns[0].get("timestamp", "")) if raw_turns else 0
+ last_epoch = _parse_iso_epoch(raw_turns[-1].get("timestamp", "")) if raw_turns else 0
+ duration_seconds = (last_epoch - first_epoch) if (first_epoch and last_epoch and last_epoch > first_epoch) else 0
+ # Wall-clock seconds (first user prompt → last assistant turn). Picks
+ # up the initial pre-first-response wait that ``duration_seconds``
+ # excludes — relevant for benchmark / headless ``claude -p`` runs
+ # where prompt #1 lands at session start. Falls back to
+ # ``duration_seconds`` when ``user_ts`` is empty (e.g. resumed
+ # session whose first user entry was filtered out).
+ first_user_epoch = user_ts[0] if user_ts else 0
+ wall_clock_seconds = (
+ (last_epoch - first_user_epoch)
+ if (first_user_epoch and last_epoch and last_epoch > first_user_epoch)
+ else duration_seconds
+ )
+ # advisorModel is stamped on every assistant JSONL entry when advisor
+ # is configured for the session — read it once from the first match.
+ advisor_configured_model: str | None = next(
+ (t.get("advisorModel") for t in raw_turns if t.get("advisorModel")),
+ None,
+ )
+ session_dict = {
+ "session_id": session_id,
+ "first_ts": _fmt_ts(raw_turns[0].get("timestamp", ""), tz_offset_hours) if raw_turns else "",
+ "last_ts": _fmt_ts(raw_turns[-1].get("timestamp", ""), tz_offset_hours) if raw_turns else "",
+ "duration_seconds": duration_seconds,
+ "wall_clock_seconds": wall_clock_seconds,
+ "turns": turn_records,
+ "subtotal": _totals_from_turns(turn_records),
+ "models": _model_counts(turn_records),
+ "time_of_day": _build_time_of_day(user_ts, offset_hours=tz_offset_hours),
+ "resumes": resumes,
+ "advisor_configured_model": advisor_configured_model,
+ }
+ # Per-session phase-A aggregators: cache-breaks are intrinsically
+ # session-scoped (a turn either breaks the cache in this session's
+ # context or it doesn't). by_skill / by_subagent_type are computed
+ # at both per-session and report scopes so either drilldown has a
+ # self-consistent table when displayed in isolation.
+ session_dict["cache_breaks"] = _detect_cache_breaks(
+ session_dict, threshold=cache_break_threshold,
+ )
+ session_dict["by_skill"] = _build_by_skill(
+ [session_dict], session_dict["subtotal"]["cost"],
+ )
+ session_dict["by_subagent_type"] = _build_by_subagent_type(
+ [session_dict], session_dict["subtotal"]["cost"],
+ )
+ sessions_out.append(session_dict)
+
+ all_turns = [t for s in sessions_out for t in s["turns"]]
+ all_user_ts = sorted(ts for _, _, uts in sessions_raw for ts in uts)
+ blocks = _build_session_blocks(sessions_raw)
+ totals = _totals_from_turns(all_turns)
+ report = {
+ "generated_at": datetime.now(timezone.utc).isoformat(),
+ "mode": mode,
+ "slug": slug,
+ "tz_offset_hours": tz_offset_hours,
+ "tz_label": tz_label,
+ "sessions": sessions_out,
+ "totals": totals,
+ "models": _model_counts(all_turns),
+ "time_of_day": _build_time_of_day(all_user_ts, offset_hours=tz_offset_hours),
+ "session_blocks": blocks,
+ "block_summary": _weekly_block_counts(blocks),
+ "weekly_rollup": _build_weekly_rollup(sessions_out, sessions_raw, blocks),
+ "peak": peak,
+ "resumes": [r for s in sessions_out for r in s["resumes"]],
+ # Phase-A cross-cutting tables (v1.6.0). All three are always
+ # attached; renderers auto-hide when the list/dict is empty.
+ "cache_breaks": [cb for s in sessions_out for cb in s.get("cache_breaks", [])],
+ "by_skill": _build_by_skill(sessions_out, totals.get("cost", 0.0)),
+ "by_subagent_type": _build_by_subagent_type(sessions_out, totals.get("cost", 0.0)),
+ "cache_break_threshold": cache_break_threshold,
+ # Phase-B (v1.7.0): subagent → parent-prompt attribution summary.
+ # Renderers read ``attributed_subagent_*`` directly off turn
+ # records; this top-level dict surfaces orphan/cycle counts +
+ # nested-depth observed for footer + JSON consumers.
+ "subagent_attribution_summary": attribution_summary,
+ # User-requested prompt sort mode (or None = renderer default).
+ # HTML/MD default to ``"total"`` (parent + attributed subagent
+ # cost — bubbles up cheap-prompt-spawning-expensive-subagent
+ # turns); CSV/JSON default to ``"self"`` (parent only) so
+ # script consumers parsing the prior output ordering remain
+ # stable. Value is preserved on the report dict so renderers
+ # can do their own per-format defaulting.
+ "sort_prompts_by": sort_prompts_by,
+ # Whether the loader was invoked with --include-subagents.
+ # Renderers read this to decide whether the Subagent-types table's
+ # zero token columns mean "no spawns happened" vs "spawn-count
+ # only · token data not loaded".
+ "include_subagents": include_subagents,
+ # CLI opt-out for the Phase 7 model-compare insight card. Keyed
+ # with an underscore so downstream JSON exports don't leak the
+ # flag into user-facing schema; `_compute_usage_insights` reads
+ # it before returning the list.
+ "_suppress_model_compare_insight": suppress_model_compare_insight,
+ }
+ # Sort global cache_breaks by uncached desc to keep "worst-first" order.
+ report["cache_breaks"].sort(key=lambda b: -int(b.get("uncached", 0)))
+ # v1.26.0: precompute the headline subagent share + within-session
+ # split. Stashing here means all renderers (HTML / MD / JSON / CSV)
+ # read consistent values, and the JSON export carries them out of
+ # the box without per-renderer wiring.
+ report["subagent_share_stats"] = _compute_subagent_share(report)
+ report["subagent_within_session_split"] = _compute_within_session_split(sessions_out)
+ report["usage_insights"] = _compute_usage_insights(report)
+ # v1.8.0: token-waste classification — runs after attribution + cache-break
+ # detection (both mutate turn dicts in place); annotates turns with
+ # turn_character / turn_character_label / turn_risk and attaches
+ # the top-level waste_analysis summary dict.
+ report["waste_analysis"] = _build_waste_analysis(sessions_out)
+ # Drop the internal flag after use so the report dict stays clean
+ # for downstream renderers / JSON export.
+ report.pop("_suppress_model_compare_insight", None)
+ return report
+
+
+def _build_resumes(turn_records: list[dict]) -> list[dict]:
+ """Extract resume markers from per-session turn records.
+
+ A resume marker is a turn flagged ``is_resume_marker=True`` by
+ `_extract_turns` (synthetic no-op preceded by a `/exit` local-command
+ replay in the last ~10 user entries). For each marker we compute the
+ wall-clock gap to the previous assistant turn in the same session —
+ the practical "away" time between the user's prior work and the
+ resumed work. When the marker is the first turn in the session
+ (prior-session context not observable from this file), gap is null.
+ When the marker is the last turn in the session (user exited and did
+ not return), ``terminal`` is True — render as an exit marker rather
+ than a resume divider.
+
+ Returns a list ordered by ``turn_index``; each entry is a dict with
+ ``timestamp``, ``timestamp_fmt``, ``turn_index``, ``gap_seconds``,
+ ``terminal``.
+ """
+ markers: list[dict] = []
+ for i, t in enumerate(turn_records):
+ if not t.get("is_resume_marker"):
+ continue
+ gap: float | None = None
+ if i > 0:
+ prev_dt = _parse_iso_dt(turn_records[i-1].get("timestamp", ""))
+ cur_dt = _parse_iso_dt(t.get("timestamp", ""))
+ if prev_dt and cur_dt:
+ try:
+ gap = (cur_dt - prev_dt).total_seconds()
+ except (ValueError, AttributeError, TypeError, OSError):
+ gap = None
+ terminal = (i == len(turn_records) - 1)
+ markers.append({
+ "timestamp": t.get("timestamp", ""),
+ "timestamp_fmt": t.get("timestamp_fmt", ""),
+ "turn_index": t.get("index"),
+ "gap_seconds": gap,
+ "terminal": terminal,
+ })
+ return markers
+
+
+# ---------------------------------------------------------------------------
+# Formatting helpers (shared)
+# ---------------------------------------------------------------------------
+
+COL = "{:<4} {:<19} {:>11} {:>7} {:>9} {:>9} {:>10} {:>9}"
+# Optional suffix columns: Mode (fast mode), Content (per-turn block distribution)
+_COL_MODE_SUFFIX = " {:<4}"
+_COL_CONTENT_SUFFIX = " {:<15}"
+COL_M = COL + _COL_MODE_SUFFIX # retained for back-compat
+
+
+def _text_format(show_mode: bool, show_content: bool) -> str:
+ """Assemble the text-row format string with optional trailing columns."""
+ fmt = COL
+ if show_mode:
+ fmt += _COL_MODE_SUFFIX
+ if show_content:
+ fmt += _COL_CONTENT_SUFFIX
+ return fmt
+
+
+def _text_table_headers(tz_offset_hours: float = 0.0,
+ show_mode: bool = False,
+ show_content: bool = False) -> tuple[str, str, str]:
+ """Return (hdr, sep, wide) for the text timeline table in the given tz."""
+ time_col = f"Time ({_short_tz_label(tz_offset_hours)})"
+ fmt = _text_format(show_mode, show_content)
+ args = ["#", time_col, "Input (new)", "Output",
+ "CacheRd", "CacheWr", "Total", "Cost $"]
+ if show_mode:
+ args.append("Mode")
+ if show_content:
+ args.append("Content")
+ hdr = fmt.format(*args)
+ return hdr, "-" * len(hdr), "=" * len(hdr)
+
+
+def _report_has_any(report: dict, predicate) -> bool:
+ """Return True if any turn across any session matches ``predicate``."""
+ return any(predicate(t) for s in report["sessions"] for t in s["turns"])
+
+
+def _has_fast(report: dict) -> bool:
+ """Return True if any turn in the report used fast mode."""
+ return _report_has_any(report, lambda t: t.get("speed") == "fast")
+
+
+def _has_1h_cache(report: dict) -> bool:
+ """Return True if any turn used the 1-hour cache TTL tier."""
+ return _report_has_any(report, lambda t: t.get("cache_write_1h_tokens", 0) > 0)
+
+
+def _has_thinking(report: dict) -> bool:
+ """Return True if any turn carried at least one thinking block."""
+ return _report_has_any(
+ report, lambda t: (t.get("content_blocks") or {}).get("thinking", 0) > 0
+ )
+
+
+def _has_tool_use(report: dict) -> bool:
+ """Return True if any turn carried at least one tool_use block."""
+ return _report_has_any(
+ report, lambda t: (t.get("content_blocks") or {}).get("tool_use", 0) > 0
+ )
+
+
+def _has_content_blocks(report: dict) -> bool:
+ """Return True if any turn carried any content block of any type.
+
+ Drives conditional rendering of the Content column so legacy reports
+ (or empty fixtures) stay visually unchanged.
+ """
+ def _any_nonzero(t):
+ cb = t.get("content_blocks") or {}
+ return any(v > 0 for v in cb.values())
+ return _report_has_any(report, _any_nonzero)
+
+
+def _fmt_content_cell(cb: dict) -> str:
+ """Format the per-turn Content cell. Zeros are omitted.
+
+ Example: ``{thinking: 3, tool_use: 2, text: 1}`` → ``"T3 u2 x1"``.
+ Returns ``"-"`` when every count is zero so empty rows stay visible.
+ """
+ if not cb:
+ return "-"
+ parts: list[str] = []
+ for key, letter in _CONTENT_LETTERS:
+ n = cb.get(key, 0)
+ if n:
+ parts.append(f"{letter}{n}")
+ return " ".join(parts) if parts else "-"
+
+
+def _fmt_content_title(cb: dict) -> str:
+ """Human-readable tooltip text for the per-turn Content cell."""
+ if not cb:
+ return ""
+ parts = [f"{cb.get(key, 0)} {key}"
+ for key, _ in _CONTENT_LETTERS if cb.get(key, 0) > 0]
+ return ", ".join(parts)
+
+
+def _fmt_ts(ts: str, offset_hours: float = 0.0) -> str:
+ dt = _parse_iso_dt(ts)
+ if dt is None:
+ return ts[:19] if len(ts) >= 19 else ts
+ try:
+ if offset_hours:
+ dt = dt.astimezone(timezone(timedelta(hours=offset_hours)))
+ return dt.strftime("%Y-%m-%d %H:%M:%S")
+ except (ValueError, OverflowError, OSError):
+ return ts[:19] if len(ts) >= 19 else ts
+
+
+def _fmt_generated_at(report: dict) -> str:
+ """Format ``report["generated_at"]`` in the report's display tz.
+
+ Falls back to a UTC-suffixed string when the timestamp can't be
+ parsed or shifted (preserves the prior bare-except behavior of the
+ two markdown/HTML render sites this consolidates).
+ """
+ raw = report.get("generated_at", "")
+ tz_offset = report.get("tz_offset_hours", 0.0)
+ fallback = raw[:19].replace("T", " ") + " UTC"
+ dt = _parse_iso_dt(raw)
+ if dt is None:
+ return fallback
+ try:
+ local = dt.astimezone(timezone(timedelta(hours=tz_offset)))
+ return local.strftime("%Y-%m-%d %H:%M:%S") + f" {_short_tz_label(tz_offset)}"
+ except (ValueError, OverflowError, OSError):
+ return fallback
+
+
+def _short_tz_label(offset_hours: float) -> str:
+ if offset_hours == 0:
+ return "UTC"
+ sign = "+" if offset_hours > 0 else "-"
+ return f"UTC{sign}{abs(offset_hours):g}"
+
+
+def _fmt_epoch_local(epoch: int, offset_hours: float = 0.0,
+ fmt: str = "%Y-%m-%d %H:%M:%S") -> str:
+ """Format an integer epoch in the given UTC offset."""
+ offset_sec = int(offset_hours * 3600)
+ return datetime.fromtimestamp(
+ epoch + offset_sec, tz=timezone.utc,
+ ).strftime(fmt)
+
+
+def _fmt_cwr_row(t: dict) -> str:
+ """Per-turn CacheWr cell. Appends `*` when the turn used 1h-tier cache."""
+ n = t["cache_write_tokens"]
+ if t.get("cache_write_ttl") in ("1h", "mix"):
+ return f"{n:>8,}*"
+ return f"{n:>9,}"
+
+
+def _fmt_cwr_subtotal(s: dict) -> str:
+ """Subtotal/total CacheWr cell. `*` when any 1h tokens are in the sum."""
+ n = s.get("cache_write", 0)
+ if s.get("cache_write_1h", 0) > 0:
+ return f"{n:>8,}*"
+ return f"{n:>9,}"
+
+
+def _row_text(t: dict, show_mode: bool = False,
+ show_content: bool = False) -> str:
+ fmt = _text_format(show_mode, show_content)
+ args = [
+ t["index"], t["timestamp_fmt"],
+ f"{t['input_tokens']:>7,}", f"{t['output_tokens']:>7,}",
+ f"{t['cache_read_tokens']:>9,}", _fmt_cwr_row(t),
+ f"{t['total_tokens']:>10,}",
+ f"${t['cost_usd']:>8.4f}",
+ ]
+ if show_mode:
+ spd = t.get("speed", "")
+ args.append("fast" if spd == "fast" else "std")
+ if show_content:
+ args.append(_fmt_content_cell(t.get("content_blocks") or {}))
+ return fmt.format(*args)
+
+
+def _subtotal_text(label: str, s: dict, show_mode: bool = False,
+ show_content: bool = False) -> str:
+ fmt = _text_format(show_mode, show_content)
+ args = [
+ label, "",
+ f"{s['input']:>7,}", f"{s['output']:>7,}",
+ f"{s['cache_read']:>9,}", _fmt_cwr_subtotal(s),
+ f"{s['total']:>10,}",
+ f"${s['cost']:>8.4f}",
+ ]
+ if show_mode:
+ args.append("")
+ if show_content:
+ args.append("")
+ return fmt.format(*args)
+
+
+def _text_legend(tz_label: str, show_mode: bool, show_ttl: bool,
+ show_content: bool = False) -> str:
+ """Build the column legend emitted above the timeline table."""
+ rows = [
+ ("#", "deduplicated turn index"),
+ ("Time", f"turn start, local tz ({tz_label})"),
+ ]
+ if show_mode:
+ rows.append(("Mode", "fast / standard (only shown when fast mode was used)"))
+ rows.extend([
+ ("Input", "net new input tokens (uncached)"),
+ ("Output", "generated tokens (includes thinking + tool_use block tokens)"),
+ ("CacheRd", "tokens read from cache (cheap)"),
+ ])
+ if show_ttl:
+ rows.append(("CacheWr", "tokens written to cache; `*` = includes 1h-tier (see footer)"))
+ else:
+ rows.append(("CacheWr", "tokens written to cache (one-time)"))
+ rows.extend([
+ ("Total", "sum of the four billable token buckets"),
+ ("Cost $", "estimated USD for this turn"),
+ ])
+ if show_content:
+ rows.append((
+ "Content",
+ "content blocks per turn: T thinking, u tool_use, x text, "
+ "r tool_result, i image, v server_tool_use, R advisor_tool_result (zeros omitted)",
+ ))
+ w = max(len(k) for k, _ in rows)
+ lines = ["Columns:"] + [f" {k:<{w}} {v}" for k, v in rows]
+ return "\n".join(lines)
+
+
+def _footer_text(totals: dict, models: dict[str, int],
+ time_of_day: dict | None = None,
+ tz_label: str = "UTC",
+ session_blocks: list[dict] | None = None,
+ block_summary: dict | None = None) -> str:
+ """Build the text footer with cache stats, model breakdown, and time-of-day.
+
+ Args:
+ totals: Aggregated token/cost totals dict.
+ models: ``{model_id: turn_count}`` mapping.
+ time_of_day: Optional ``time_of_day`` report section. When provided,
+ a UTC-bucketed user activity summary is appended.
+ """
+ lines = [
+ "",
+ f"Cache savings vs no-cache baseline : ${totals['cache_savings']:.4f}",
+ f"Cache hit ratio (read / total input): {totals['cache_hit_pct']:.1f}%",
+ ]
+ if totals.get("cache_write_1h", 0) > 0:
+ lines.append(
+ f"Extra cost paid for 1h cache tier : ${totals.get('extra_1h_cost', 0.0):.4f}"
+ )
+ pct_1h = 100 * totals["cache_write_1h"] / max(1, totals["cache_write"])
+ lines.append(
+ f"Cache TTL mix (1h share of writes) : {pct_1h:.1f}% "
+ f"[* in CacheWr column = includes 1h-tier cache write]"
+ )
+ if totals.get("thinking_turn_count", 0) > 0:
+ lines.append(
+ f"Extended thinking turns : "
+ f"{totals['thinking_turn_count']} of {totals.get('turns', 0)} "
+ f"({totals.get('thinking_turn_pct', 0.0):.1f}%, "
+ f"{(totals.get('content_blocks') or {}).get('thinking', 0)} blocks)"
+ )
+ if totals.get("tool_call_total", 0) > 0:
+ top3 = totals.get("tool_names_top3") or []
+ top3_str = ", ".join(top3) if top3 else "none"
+ lines.append(
+ f"Tool calls : "
+ f"{totals['tool_call_total']} total, "
+ f"{totals.get('tool_call_avg_per_turn', 0.0):.1f}/turn "
+ f"(top: {top3_str})"
+ )
+ if totals.get("advisor_call_count", 0) > 0:
+ _adv_n = totals["advisor_call_count"]
+ _adv_c = totals.get("advisor_cost_usd", 0.0)
+ lines.append(
+ f"Advisor calls : "
+ f"{_adv_n} call{'s' if _adv_n != 1 else ''} +${_adv_c:.4f}"
+ )
+ if models:
+ lines.append("")
+ lines.append("Models used:")
+ for m, cnt in sorted(models.items(), key=lambda x: -x[1]):
+ r = _pricing_for(m)
+ lines.append(
+ f" {m:<40} {cnt:>3} turns "
+ f"(${r['input']:.2f}/${r['output']:.2f}/${r['cache_read']:.2f}/${r['cache_write']:.2f} per 1M in/out/rd/wr)"
+ )
+ if time_of_day and time_of_day.get("message_count", 0) > 0:
+ b = time_of_day["buckets"]
+ lines.append("")
+ lines.append(f"User prompts by time of day ({tz_label}):")
+ lines.append(f" Night (0\u20136): {b.get('night', 0):>5,}")
+ lines.append(f" Morning (6\u201312): {b.get('morning', 0):>5,}")
+ lines.append(f" Afternoon (12\u201318):{b.get('afternoon', 0):>5,}")
+ lines.append(f" Evening (18\u201324): {b.get('evening', 0):>5,}")
+
+ hod = time_of_day.get("hour_of_day")
+ if hod and hod.get("total", 0) > 0:
+ hours = hod["hours"]
+ mx = max(hours) or 1
+ lines.append("")
+ lines.append(f"Hour-of-day ({tz_label}) — each \u2588 \u2248 {mx/20:.1f} prompts:")
+ for h in range(24):
+ bar = "\u2588" * int(hours[h] / mx * 20)
+ lines.append(f" {h:02d}:00 {hours[h]:>4,} {bar}")
+
+ wh = time_of_day.get("weekday_hour")
+ if wh and wh.get("total", 0) > 0:
+ row_totals = wh["row_totals"]
+ days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+ lines.append("")
+ lines.append(f"Weekday totals ({tz_label}):")
+ for i, d in enumerate(days):
+ lines.append(f" {d}: {row_totals[i]:>5,}")
+
+ if session_blocks:
+ lines.append("")
+ s7 = block_summary.get("trailing_7", 0) if block_summary else 0
+ s14 = block_summary.get("trailing_14", 0) if block_summary else 0
+ tot = block_summary.get("total", len(session_blocks)) if block_summary else len(session_blocks)
+ lines.append(f"5-hour session blocks ({tot} total; "
+ f"{s7} in last 7d, {s14} in last 14d):")
+ recent = session_blocks[-8:]
+ for b in recent:
+ anchor = b["anchor_iso"][:16].replace("T", " ")
+ dur = b["elapsed_min"]
+ lines.append(
+ f" {anchor}Z "
+ f"dur={dur:>5.0f}m "
+ f"turns={b['turn_count']:>3} "
+ f"prompts={b['user_msg_count']:>3} "
+ f"${b['cost_usd']:>7.3f}"
+ )
+ if len(session_blocks) > len(recent):
+ lines.append(f" ... ({len(session_blocks) - len(recent)} earlier blocks omitted)")
+ return "\n".join(lines)
+
+
+# ---------------------------------------------------------------------------
+# Renderers
+# ---------------------------------------------------------------------------
+
+def render_text(report: dict) -> str:
+ if report.get("mode") == "compare":
+ return sys.modules["session_metrics_compare"].render_compare_text(report)
+ if report.get("mode") == "instance":
+ return _render_instance_text(report)
+ out = io.StringIO()
+
+ def p(*args, **kw):
+ print(*args, **kw, file=out)
+
+ sessions = report["sessions"]
+
+ m = _has_fast(report)
+ has_1h = _has_1h_cache(report)
+ has_content = _has_content_blocks(report)
+ tz_offset = report.get("tz_offset_hours", 0.0)
+ tz_label = report.get("tz_label", "UTC")
+ hdr, sep, wide = _text_table_headers(tz_offset, show_mode=m,
+ show_content=has_content)
+
+ p(_text_legend(tz_label, show_mode=m, show_ttl=has_1h,
+ show_content=has_content))
+ p()
+
+ if report["mode"] == "project":
+ p(f"Project: {report['slug']}")
+ p(f"Sessions with data: {len(sessions)}")
+ p()
+ for i, s in enumerate(sessions, 1):
+ p(wide)
+ _adv_n = s["subtotal"].get("advisor_call_count", 0)
+ _adv_tag = ""
+ if _adv_n > 0:
+ _adv_c = s["subtotal"].get("advisor_cost_usd", 0.0)
+ _adv_m = s.get("advisor_configured_model") or ""
+ _adv_label = f" · {_adv_m}" if _adv_m else ""
+ _adv_tag = f" [advisor: {_adv_n} call{'s' if _adv_n != 1 else ''}{_adv_label} · +${_adv_c:.4f}]"
+ p(f" Session {s['session_id'][:8]}… {s['first_ts']} → {s['last_ts']} ({len(s['turns'])} turns){_adv_tag}")
+ p(wide)
+ p(hdr)
+ for t in s["turns"]:
+ p(_row_text(t, m, has_content))
+ p(sep)
+ p(_subtotal_text(f"S{i:02}", s["subtotal"], m, has_content))
+ p()
+ p(wide)
+ p(f" PROJECT TOTAL — {len(sessions)} session{'s' if len(sessions) != 1 else ''}, {report['totals']['turns']} turns")
+ p(wide)
+ p(hdr)
+ p(sep)
+ p(_subtotal_text("TOT", report["totals"], m, has_content))
+ else:
+ s = sessions[0]
+ p(hdr)
+ for t in s["turns"]:
+ p(_row_text(t, m, has_content))
+ p(sep)
+ p(_subtotal_text("TOT", s["subtotal"], m, has_content))
+
+ p(_footer_text(report["totals"], report["models"], report.get("time_of_day"),
+ tz_label=report.get("tz_label", "UTC"),
+ session_blocks=report.get("session_blocks"),
+ block_summary=report.get("block_summary")))
+ return out.getvalue()
+
+
+def _tod_for_json(tod: dict) -> dict:
+ """Convert a ``time_of_day`` section for JSON export.
+
+ Replaces internal ``epoch_secs`` (integer list) with human-readable
+ ``utc_timestamps`` (ISO-8601 strings). The conversion is O(n) but only
+ runs once per export — no deep-copy of the full report is needed.
+ """
+ return {
+ "utc_timestamps": [
+ datetime.fromtimestamp(e, tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ for e in tod.get("epoch_secs", [])
+ ],
+ "message_count": tod.get("message_count", 0),
+ "buckets": tod.get("buckets", {}),
+ "hour_of_day": tod.get("hour_of_day", {}),
+ "weekday_hour": tod.get("weekday_hour", {}),
+ "offset_hours": tod.get("offset_hours", 0.0),
+ }
+
+
+def render_json(report: dict) -> str:
+ """Render the full report as indented JSON.
+
+ Internal ``epoch_secs`` lists in ``time_of_day`` sections are converted to
+ ISO-8601 ``utc_timestamps`` for human readability. The transform uses a
+ shallow copy of the report — session turns, subtotals, and model dicts are
+ shared by reference to avoid copying ~thousands of turn record dicts.
+ """
+ if report.get("mode") == "compare":
+ return sys.modules["session_metrics_compare"].render_compare_json(report)
+ if report.get("mode") == "instance":
+ return _render_instance_json(report)
+ # Shallow-transform: only replace time_of_day sections
+ export = {**report}
+ if "time_of_day" in export:
+ export["time_of_day"] = _tod_for_json(export["time_of_day"])
+ if "sessions" in export:
+ export["sessions"] = [
+ {**s, "time_of_day": _tod_for_json(s["time_of_day"])}
+ if "time_of_day" in s else s
+ for s in export["sessions"]
+ ]
+ return json.dumps(export, indent=2)
+
+
+def render_csv(report: dict) -> str:
+ """Render turn-level CSV with an appended time-of-day summary section.
+
+ The first section contains one row per assistant turn (unchanged).
+ A blank separator row is followed by a ``USER ACTIVITY BY TIME OF DAY``
+ summary with per-session and project-wide counts bucketed at UTC.
+ """
+ if report.get("mode") == "compare":
+ return sys.modules["session_metrics_compare"].render_compare_csv(report)
+ if report.get("mode") == "instance":
+ return _render_instance_csv(report)
+ out = io.StringIO()
+ w = csv_mod.writer(out)
+ w.writerow(["session_id", "turn", "timestamp", "model", "speed",
+ "input_tokens", "output_tokens", "cache_read_tokens", "cache_write_tokens",
+ "cache_write_5m_tokens", "cache_write_1h_tokens", "cache_write_ttl",
+ "total_tokens", "cost_usd", "no_cache_cost_usd",
+ "thinking_blocks", "tool_use_blocks", "text_blocks",
+ "tool_result_blocks", "image_blocks",
+ # Phase-B (v1.7.0) attribution columns. Always emitted so
+ # column count is stable across reports; values are 0 on
+ # turns that didn't spawn a subagent (the common case).
+ "attributed_subagent_tokens", "attributed_subagent_cost",
+ "attributed_subagent_count",
+ "stop_reason", "is_cache_break",
+ "turn_character", "turn_character_label", "turn_risk"])
+ for s in report["sessions"]:
+ for t in s["turns"]:
+ cb = t.get("content_blocks") or {}
+ w.writerow([
+ s["session_id"], t["index"], t["timestamp"], t["model"],
+ t.get("speed", ""),
+ t["input_tokens"], t["output_tokens"],
+ t["cache_read_tokens"], t["cache_write_tokens"],
+ t.get("cache_write_5m_tokens", 0),
+ t.get("cache_write_1h_tokens", 0),
+ t.get("cache_write_ttl", ""),
+ t["total_tokens"],
+ f"{t['cost_usd']:.6f}", f"{t['no_cache_cost_usd']:.6f}",
+ cb.get("thinking", 0), cb.get("tool_use", 0),
+ cb.get("text", 0), cb.get("tool_result", 0),
+ cb.get("image", 0),
+ t.get("attributed_subagent_tokens", 0),
+ f"{float(t.get('attributed_subagent_cost', 0.0)):.6f}",
+ t.get("attributed_subagent_count", 0),
+ t.get("stop_reason", ""),
+ t.get("is_cache_break", False),
+ t.get("turn_character", ""),
+ t.get("turn_character_label", ""),
+ t.get("turn_risk", False),
+ ])
+
+ # Time-of-day summary section
+ tz_label = report.get("tz_label", "UTC")
+ w.writerow([])
+ w.writerow([f"# USER ACTIVITY BY TIME OF DAY ({tz_label})"])
+ w.writerow(["scope", "id", "night_0_6", "morning_6_12",
+ "afternoon_12_18", "evening_18_24", "total"])
+ for s in report["sessions"]:
+ tod = s.get("time_of_day", {})
+ b = tod.get("buckets", {})
+ w.writerow(["session", s["session_id"],
+ b.get("night", 0), b.get("morning", 0),
+ b.get("afternoon", 0), b.get("evening", 0),
+ tod.get("message_count", 0)])
+ tod = report.get("time_of_day", {})
+ b = tod.get("buckets", {})
+ w.writerow(["project", report["slug"],
+ b.get("night", 0), b.get("morning", 0),
+ b.get("afternoon", 0), b.get("evening", 0),
+ tod.get("message_count", 0)])
+
+ # Hour-of-day section (project-wide)
+ hod = tod.get("hour_of_day")
+ if hod and hod.get("total", 0) > 0:
+ w.writerow([])
+ w.writerow([f"# HOUR OF DAY ({tz_label})"])
+ w.writerow(["hour"] + [f"{h:02d}" for h in range(24)] + ["total"])
+ w.writerow(["prompts"] + list(hod["hours"]) + [hod["total"]])
+
+ # Weekday x hour matrix (project-wide)
+ wh = tod.get("weekday_hour")
+ if wh and wh.get("total", 0) > 0:
+ days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+ w.writerow([])
+ w.writerow([f"# WEEKDAY x HOUR ({tz_label})"])
+ w.writerow(["weekday"] + [f"{h:02d}" for h in range(24)] + ["row_total"])
+ for i, d in enumerate(days):
+ w.writerow([d] + list(wh["matrix"][i]) + [wh["row_totals"][i]])
+ w.writerow(["col_total"] + list(wh["col_totals"]) + [wh["total"]])
+
+ # 5-hour session blocks
+ blocks = report.get("session_blocks") or []
+ summary = report.get("block_summary") or {}
+ if blocks:
+ w.writerow([])
+ w.writerow(["# 5-HOUR SESSION BLOCKS"])
+ w.writerow(["trailing_7", "trailing_14", "trailing_30", "total"])
+ w.writerow([summary.get("trailing_7", 0), summary.get("trailing_14", 0),
+ summary.get("trailing_30", 0), summary.get("total", len(blocks))])
+ w.writerow([])
+ w.writerow(["anchor_utc", "last_utc", "elapsed_min", "turns",
+ "user_prompts", "input", "output", "cache_read",
+ "cache_write", "cost_usd", "sessions_touched"])
+ for b in blocks:
+ w.writerow([
+ b["anchor_iso"], b["last_iso"], f"{b['elapsed_min']:.1f}",
+ b["turn_count"], b["user_msg_count"],
+ b["input"], b["output"], b["cache_read"], b["cache_write"],
+ f"{b['cost_usd']:.6f}", len(b["sessions_touched"]),
+ ])
+
+ # Phase-A (v1.6.0): skill/subagent/cache-break sections.
+ by_skill = report.get("by_skill") or []
+ if by_skill:
+ w.writerow([])
+ w.writerow(["# SKILLS / SLASH COMMANDS"])
+ w.writerow(["name", "invocations", "turns", "input", "output",
+ "cache_read", "cache_write", "total_tokens",
+ "cost_usd", "cache_hit_pct", "pct_total_cost"])
+ for r in by_skill:
+ w.writerow([
+ r.get("name", ""), r.get("invocations", 0),
+ r.get("turns_attributed", 0), r.get("input", 0),
+ r.get("output", 0), r.get("cache_read", 0),
+ r.get("cache_write", 0), r.get("total_tokens", 0),
+ f"{float(r.get('cost_usd', 0.0)):.6f}",
+ f"{float(r.get('cache_hit_pct', 0.0)):.1f}",
+ f"{float(r.get('pct_total_cost', 0.0)):.2f}",
+ ])
+
+ by_subagent = report.get("by_subagent_type") or []
+ if by_subagent:
+ w.writerow([])
+ w.writerow(["# SUBAGENT TYPES"])
+ w.writerow(["name", "spawn_count", "turns", "input", "output",
+ "cache_read", "cache_write", "total_tokens",
+ "avg_tokens_per_call", "cost_usd",
+ "cache_hit_pct", "pct_total_cost",
+ # v1.26.0: per-invocation warm-up signals.
+ "invocation_count", "first_turn_share_pct",
+ "sp_amortisation_pct"])
+ for r in by_subagent:
+ w.writerow([
+ r.get("name", ""), r.get("spawn_count", 0),
+ r.get("turns_attributed", 0), r.get("input", 0),
+ r.get("output", 0), r.get("cache_read", 0),
+ r.get("cache_write", 0), r.get("total_tokens", 0),
+ f"{float(r.get('avg_tokens_per_call', 0.0)):.1f}",
+ f"{float(r.get('cost_usd', 0.0)):.6f}",
+ f"{float(r.get('cache_hit_pct', 0.0)):.1f}",
+ f"{float(r.get('pct_total_cost', 0.0)):.2f}",
+ int(r.get("invocation_count", 0)),
+ f"{float(r.get('first_turn_share_pct', 0.0)):.1f}",
+ f"{float(r.get('sp_amortisation_pct', 0.0)):.1f}",
+ ])
+
+ cache_breaks = report.get("cache_breaks") or []
+ if cache_breaks:
+ w.writerow([])
+ threshold = int(report.get("cache_break_threshold",
+ _CACHE_BREAK_DEFAULT_THRESHOLD))
+ w.writerow([f"# CACHE BREAKS (> {threshold:,} uncached)"])
+ w.writerow(["session_id", "turn_index", "timestamp", "uncached",
+ "total_tokens", "cache_break_pct", "slash_command",
+ "project", "prompt_snippet"])
+ for cb in cache_breaks:
+ w.writerow([
+ cb.get("session_id", ""), cb.get("turn_index", ""),
+ cb.get("timestamp", ""), cb.get("uncached", 0),
+ cb.get("total_tokens", 0),
+ f"{float(cb.get('cache_break_pct', 0.0)):.1f}",
+ cb.get("slash_command", ""),
+ cb.get("project", ""),
+ (cb.get("prompt_snippet") or "")[:240],
+ ])
+
+ wa = report.get("waste_analysis")
+ if wa:
+ dist = wa.get("distribution") or {}
+ if dist:
+ w.writerow([])
+ w.writerow(["# TURN CHARACTER ANALYSIS"])
+ w.writerow(["turn_character", "turn_character_label", "count"])
+ for char, count in sorted(dist.items(), key=lambda x: -x[1]):
+ w.writerow([char, _TURN_CHARACTER_LABELS.get(char, char), count])
+ retry = wa.get("retry_chains") or {}
+ if retry.get("chain_count", 0) > 0:
+ w.writerow([])
+ w.writerow([f"# RETRY CHAINS ({retry['chain_count']} chains, "
+ f"{retry.get('retry_cost_pct', 0):.1f}% of session cost)"])
+ w.writerow(["chain_length", "turn_indices", "cost_usd"])
+ for c in retry.get("chains") or []:
+ w.writerow([c["length"],
+ ";".join(str(i) for i in c["turn_indices"]),
+ f"{c['cost_usd']:.6f}"])
+ reaccess = wa.get("file_reaccesses") or {}
+ if reaccess.get("reaccessed_count", 0) > 0:
+ w.writerow([])
+ w.writerow([f"# FILE RE-ACCESSES ({reaccess['reaccessed_count']} files)"])
+ w.writerow(["path", "access_count", "first_turn", "cost_usd"])
+ for d in reaccess.get("details") or []:
+ w.writerow([d["path"], d["count"], d["first_turn"],
+ f"{d['cost_usd']:.6f}"])
+
+ return out.getvalue()
+
+
+def render_md(report: dict) -> str:
+ """Render the full report as GitHub-flavored Markdown.
+
+ Includes summary cards, user activity by time of day (UTC), model pricing
+ table, and per-session turn-level tables with subtotals.
+ """
+ if report.get("mode") == "compare":
+ return sys.modules["session_metrics_compare"].render_compare_md(report)
+ if report.get("mode") == "instance":
+ return _render_instance_md(report)
+ out = io.StringIO()
+
+ def p(*args, **kw):
+ print(*args, **kw, file=out)
+
+ slug = report["slug"]
+ totals = report["totals"]
+ mode = report["mode"]
+ tz_offset = report.get("tz_offset_hours", 0.0)
+ generated = _fmt_generated_at(report)
+
+ p(f"# Session Metrics — {slug}")
+ p()
+ p(f"Generated: {generated} | Mode: {mode}")
+ p()
+
+ # Summary cards
+ p("## Summary")
+ p()
+ p(f"| Metric | Value |")
+ p(f"|--------|-------|")
+ p(f"| Sessions | {len(report['sessions'])} |")
+ p(f"| Total turns | {totals['turns']:,} |")
+ # Wall clock + mean turn latency. ``Wall clock`` is the sum of per-session
+ # first→last assistant-turn intervals; for benchmark / headless ``claude
+ # -p`` runs this approximates the orchestrator's perceived wall-clock.
+ # ``Mean turn latency`` is the average ``latency_seconds`` across every
+ # assistant turn that had a parseable predecessor — drops resume markers
+ # and any turn whose predecessor timestamp couldn't be parsed.
+ _wall_total = sum(int(s.get("wall_clock_seconds", 0) or s.get("duration_seconds", 0)) for s in report["sessions"])
+ _turn_lats = [t["latency_seconds"] for s in report["sessions"]
+ for t in s["turns"] if t.get("latency_seconds") is not None]
+ if _wall_total > 0:
+ p(f"| Wall clock | {_fmt_duration(_wall_total)} |")
+ if _turn_lats:
+ _mean_lat = sum(_turn_lats) / len(_turn_lats)
+ p(f"| Mean turn latency | {_mean_lat:.2f}s ({len(_turn_lats)} turns) |")
+ p(f"| Total cost | ${totals['cost']:.4f} |")
+ _share_line = _build_subagent_share_md(_compute_subagent_share(report))
+ if _share_line:
+ p(_share_line)
+ p(f"| Cache savings | ${totals['cache_savings']:.4f} |")
+ p(f"| Cache hit ratio | {totals['cache_hit_pct']:.1f}% |")
+ p(f"| Total input tokens | {totals['total_input']:,} |")
+ p(f"| Input tokens (new) | {totals['input']:,} |")
+ p(f"| Output tokens | {totals['output']:,} |")
+ p(f"| Cache read tokens | {totals['cache_read']:,} |")
+ p(f"| Cache write tokens | {totals['cache_write']:,} |")
+ if totals.get("cache_write_1h", 0) > 0:
+ pct_1h = 100 * totals["cache_write_1h"] / max(1, totals["cache_write"])
+ p(f"| Cache TTL mix (1h share of writes) | {pct_1h:.1f}% |")
+ p(f"| Extra cost paid for 1h cache tier | ${totals.get('extra_1h_cost', 0.0):.4f} |")
+ if totals.get("thinking_turn_count", 0) > 0:
+ cb = totals.get("content_blocks") or {}
+ p(
+ f"| Extended thinking turns | "
+ f"{totals['thinking_turn_count']} of {totals['turns']} "
+ f"({totals.get('thinking_turn_pct', 0.0):.1f}%, "
+ f"{cb.get('thinking', 0)} blocks) |"
+ )
+ if totals.get("tool_call_total", 0) > 0:
+ top3 = totals.get("tool_names_top3") or []
+ top3_str = ", ".join(top3) if top3 else "none"
+ p(
+ f"| Tool calls | {totals['tool_call_total']} total, "
+ f"{totals.get('tool_call_avg_per_turn', 0.0):.1f}/turn "
+ f"(top: {top3_str}) |"
+ )
+ if totals.get("advisor_call_count", 0) > 0:
+ _adv_n = totals["advisor_call_count"]
+ _adv_c = totals.get("advisor_cost_usd", 0.0)
+ p(f"| Advisor calls | {_adv_n} call{'s' if _adv_n != 1 else ''} · +${_adv_c:.4f} |")
+ p()
+
+ # Usage Insights — derived from `_compute_usage_insights`. Renders only
+ # when at least one insight crossed its threshold; otherwise the
+ # section is omitted entirely so the existing layout flow is preserved.
+ md_insights = _build_usage_insights_md(report.get("usage_insights", []) or [])
+ if md_insights:
+ p(md_insights)
+
+ md_waste = _build_waste_analysis_md(report.get("waste_analysis") or {})
+ if md_waste:
+ p(md_waste)
+
+ # Time-of-day section
+ tod = report.get("time_of_day", {})
+ tz_label = report.get("tz_label", "UTC")
+ if tod.get("message_count", 0) > 0:
+ b = tod["buckets"]
+ p(f"## User Activity by Time of Day ({tz_label})")
+ p()
+ p("| Period | Hours | Messages |")
+ p("|--------|------:|---------:|")
+ p(f"| Night | 0\u20136 | {b.get('night', 0):,} |")
+ p(f"| Morning | 6\u201312 | {b.get('morning', 0):,} |")
+ p(f"| Afternoon | 12\u201318 | {b.get('afternoon', 0):,} |")
+ p(f"| Evening | 18\u201324 | {b.get('evening', 0):,} |")
+ p(f"| **Total** | | **{tod['message_count']:,}** |")
+ p()
+
+ hod = tod.get("hour_of_day")
+ if hod and hod.get("total", 0) > 0:
+ hours = hod["hours"]
+ p(f"### Hour of day ({tz_label})")
+ p()
+ p("| Hour | Prompts |")
+ p("|-----:|--------:|")
+ for h in range(24):
+ p(f"| {h:02d}:00 | {hours[h]:,} |")
+ p()
+
+ wh = tod.get("weekday_hour")
+ if wh and wh.get("total", 0) > 0:
+ days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+ p(f"### Weekday x hour ({tz_label})")
+ p()
+ header = "| Day | " + " | ".join(f"{h:02d}" for h in range(24)) + " | Total |"
+ sep = "|-----|" + "|".join(["---:"] * 24) + "|------:|"
+ p(header)
+ p(sep)
+ for i, d in enumerate(days):
+ row = wh["matrix"][i]
+ cells = " | ".join(str(c) if c else "" for c in row)
+ p(f"| {d} | {cells} | **{wh['row_totals'][i]:,}** |")
+ p()
+
+ blocks = report.get("session_blocks", [])
+ summary = report.get("block_summary", {})
+ if blocks:
+ p(f"## 5-hour session blocks ({tz_label})")
+ p()
+ p(f"- Trailing 7 days: **{summary.get('trailing_7', 0)}** blocks")
+ p(f"- Trailing 14 days: **{summary.get('trailing_14', 0)}** blocks")
+ p(f"- Trailing 30 days: **{summary.get('trailing_30', 0)}** blocks")
+ p(f"- All time: **{summary.get('total', len(blocks))}** blocks")
+ p()
+ p(f"| Anchor ({tz_label}) | Duration | Turns | Prompts | Cost | Sessions |")
+ p("|-------------|---------:|------:|--------:|-----:|---------:|")
+ for b in reversed(blocks[-12:]):
+ anchor_local = _fmt_epoch_local(b["anchor_epoch"], tz_offset, "%Y-%m-%d %H:%M")
+ p(f"| {anchor_local} | {b['elapsed_min']:.0f}m "
+ f"| {b['turn_count']:,} | {b['user_msg_count']:,} "
+ f"| ${b['cost_usd']:.3f} | {len(b['sessions_touched'])} |")
+ p()
+
+ if report["models"]:
+ p("## Models")
+ p()
+ p("| Model | Turns | $/M in | $/M out | $/M rd | $/M wr |")
+ p("|-------|------:|------:|------:|------:|------:|")
+ for m, cnt in sorted(report["models"].items(), key=lambda x: -x[1]):
+ r = _pricing_for(m)
+ p(f"| `{m}` | {cnt:,} | ${r['input']:.2f} | ${r['output']:.2f} | ${r['cache_read']:.2f} | ${r['cache_write']:.2f} |")
+ p()
+
+ # Phase-A (v1.6.0) sections: skill / subagent / cache-break tables.
+ by_skill_rows = report.get("by_skill") or []
+ if by_skill_rows:
+ p("## Skills & slash commands")
+ p()
+ p("| Name | Invocations | Turns | Input | Output | % cached | Cost $ | % of total |")
+ p("|------|------------:|------:|------:|------:|--------:|------:|-----------:|")
+ for r in by_skill_rows:
+ p(f"| `{r.get('name', '')}` | {int(r.get('invocations', 0)):,} "
+ f"| {int(r.get('turns_attributed', 0)):,} "
+ f"| {int(r.get('input', 0)):,} "
+ f"| {int(r.get('output', 0)):,} "
+ f"| {float(r.get('cache_hit_pct', 0.0)):.1f}% "
+ f"| ${float(r.get('cost_usd', 0.0)):.4f} "
+ f"| {float(r.get('pct_total_cost', 0.0)):.2f}% |")
+ p()
+
+ by_subagent_rows = report.get("by_subagent_type") or []
+ if by_subagent_rows:
+ p("## Subagent types")
+ p()
+ # v1.26.0: extra warm-up columns visible only when per-invocation
+ # data was actually observed (i.e. ``--include-subagents`` was on
+ # AND the loader saw subagent JSONL turns).
+ _show_warm = bool(report.get("include_subagents")) and any(
+ int(r.get("invocation_count", 0)) > 0 for r in by_subagent_rows
+ )
+ if _show_warm:
+ p("| Subagent | Spawns | Turns | Input | Output | % cached "
+ "| Avg/call | Cost $ | % of total | First-turn % | SP amortised % |")
+ p("|----------|-------:|------:|------:|------:|--------:|"
+ "--------:|------:|-----------:|-------------:|---------------:|")
+ else:
+ p("| Subagent | Spawns | Turns | Input | Output | % cached | Avg/call | Cost $ | % of total |")
+ p("|----------|-------:|------:|------:|------:|--------:|--------:|------:|-----------:|")
+ for r in by_subagent_rows:
+ base = (
+ f"| `{r.get('name', '')}` | {int(r.get('spawn_count', 0)):,} "
+ f"| {int(r.get('turns_attributed', 0)):,} "
+ f"| {int(r.get('input', 0)):,} "
+ f"| {int(r.get('output', 0)):,} "
+ f"| {float(r.get('cache_hit_pct', 0.0)):.1f}% "
+ f"| {float(r.get('avg_tokens_per_call', 0.0)):,.0f} "
+ f"| ${float(r.get('cost_usd', 0.0)):.4f} "
+ f"| {float(r.get('pct_total_cost', 0.0)):.2f}% "
+ )
+ if _show_warm:
+ inv_n = int(r.get("invocation_count", 0))
+ if inv_n > 0:
+ base += (
+ f"| {float(r.get('first_turn_share_pct', 0.0)):.1f}% "
+ f"| {float(r.get('sp_amortisation_pct', 0.0)):.1f}% |"
+ )
+ else:
+ base += "| — | — |"
+ else:
+ base += "|"
+ p(base)
+ p()
+
+ # Within-session spawning split — descriptive contrast that holds
+ # task / model / context constant. Only renders for sessions with
+ # ≥3 spawning AND ≥3 non-spawning turns (median needs a floor).
+ _ws_split = _compute_within_session_split(report.get("sessions") or [])
+ _ws_split_md = _build_within_session_split_md(_ws_split)
+ if _ws_split_md:
+ p(_ws_split_md)
+
+ cache_breaks_rows = report.get("cache_breaks") or []
+ if cache_breaks_rows:
+ threshold = int(report.get("cache_break_threshold",
+ _CACHE_BREAK_DEFAULT_THRESHOLD))
+ p(f"## Cache breaks (> {threshold:,} uncached)")
+ p()
+ p(f"{len(cache_breaks_rows)} event{'s' if len(cache_breaks_rows) != 1 else ''} "
+ f"— single turns where `input + cache_creation` exceeded the threshold. "
+ f"Each row names *which* turn lost the cache.")
+ p()
+ p("| Uncached | % | When | Session | Prompt |")
+ p("|---------:|--:|------|---------|--------|")
+ for cb in cache_breaks_rows[:25]:
+ sid8 = (cb.get("session_id") or "")[:8]
+ snippet = (cb.get("prompt_snippet") or "").replace("|", "\\|")[:120]
+ p(f"| {int(cb.get('uncached', 0)):,} "
+ f"| {float(cb.get('cache_break_pct', 0.0)):.0f}% "
+ f"| {cb.get('timestamp_fmt') or cb.get('timestamp', '')} "
+ f"| `{sid8}` "
+ f"| {snippet} |")
+ if len(cache_breaks_rows) > 25:
+ p()
+ p(f"_Showing top 25 of {len(cache_breaks_rows)} — raw list available in JSON export._")
+ p()
+
+ has_1h_cache = _has_1h_cache(report)
+ has_content = _has_content_blocks(report)
+ p("## Column legend")
+ p()
+ p("- **#** — deduplicated turn index")
+ p(f"- **Time** — turn start, local tz ({tz_label})")
+ p("- **Input (new)** — net new input tokens (uncached)")
+ p("- **Output** — generated tokens (includes thinking + tool_use block tokens)")
+ p("- **CacheRd** — tokens read from cache (cheap)")
+ if has_1h_cache:
+ p("- **CacheWr** — tokens written to cache; `*` suffix marks turns that used the 1-hour TTL tier")
+ else:
+ p("- **CacheWr** — tokens written to cache (one-time)")
+ p("- **Total** — sum of the four billable token buckets")
+ p("- **Cost $** — estimated USD for this turn")
+ if has_content:
+ p("- **Content** — per-turn content blocks: `T` thinking, `u` tool_use, "
+ "`x` text, `r` tool_result, `i` image, `v` server_tool_use, "
+ "`R` advisor_tool_result (zero counts omitted)")
+ p()
+
+ for i, s in enumerate(report["sessions"], 1):
+ if mode == "project":
+ st = s["subtotal"]
+ p(f"## Session {i}: `{s['session_id'][:8]}…`")
+ p()
+ p(f"{s['first_ts']} → {s['last_ts']} · {len(s['turns'])} turns · **${st['cost']:.4f}**")
+ p()
+
+ if has_content:
+ p(f"| # | Time ({tz_label}) | Input (new) | Output | CacheRd | CacheWr | Total | Cost $ | Content |")
+ p("|--:|-----------|------------:|------:|--------:|--------:|------:|-------:|:--------|")
+ else:
+ p(f"| # | Time ({tz_label}) | Input (new) | Output | CacheRd | CacheWr | Total | Cost $ |")
+ p("|--:|-----------|------------:|------:|--------:|--------:|------:|-------:|")
+ for t in s["turns"]:
+ ttl = t.get("cache_write_ttl", "")
+ cwr_cell = f"{t['cache_write_tokens']:,}" + ("*" if ttl in ("1h", "mix") else "")
+ row = (f"| {t['index']} | {t['timestamp_fmt']} "
+ f"| {t['input_tokens']:,} | {t['output_tokens']:,} "
+ f"| {t['cache_read_tokens']:,} | {cwr_cell} "
+ f"| {t['total_tokens']:,} | ${t['cost_usd']:.4f} |")
+ if has_content:
+ row += f" {_fmt_content_cell(t.get('content_blocks') or {})} |"
+ p(row)
+ st = s["subtotal"]
+ st_cwr_cell = f"{st['cache_write']:,}" + ("*" if st.get("cache_write_1h", 0) > 0 else "")
+ trow = (f"| **TOT** | | **{st['input']:,}** | **{st['output']:,}** "
+ f"| **{st['cache_read']:,}** | **{st_cwr_cell}** "
+ f"| **{st['total']:,}** | **${st['cost']:.4f}** |")
+ if has_content:
+ trow += " |"
+ p(trow)
+ if st.get("cache_write_1h", 0) > 0:
+ p()
+ p(f"_`*` = cache write includes the 1-hour TTL tier "
+ f"(5m: {st.get('cache_write_5m', 0):,}, 1h: {st['cache_write_1h']:,} tokens)._")
+ p()
+
+ return out.getvalue()
+
+
+def _session_duration_stats(session: dict) -> dict | None:
+ """Per-session wall-clock + burn rate derived from turn timestamps.
+
+ Returns None when fewer than 2 turns have usable timestamps. Burn rate
+ metrics are clamped so a single-turn session doesn't divide by zero.
+ """
+ turns = session.get("turns", [])
+ epochs = [_parse_iso_epoch(t.get("timestamp", "")) for t in turns]
+ epochs = [e for e in epochs if e]
+ if len(epochs) < 2:
+ return None
+ first, last = min(epochs), max(epochs)
+ wall_sec = last - first
+ wall_min = wall_sec / 60.0
+ st = session["subtotal"]
+ minutes = max(1e-6, wall_min)
+ return {
+ "first_epoch": first,
+ "last_epoch": last,
+ "wall_sec": wall_sec,
+ "wall_min": wall_min,
+ "tokens_per_min": st["total"] / minutes,
+ "cost_per_min": st["cost"] / minutes,
+ "turns": st["turns"],
+ }
+
+
+def _fmt_duration(sec: int) -> str:
+ """Format ``sec`` as a compact duration (``1h23m``, ``45m12s``, ``7s``)."""
+ if sec < 60:
+ return f"{sec}s"
+ if sec < 3600:
+ return f"{sec // 60}m{sec % 60:02d}s"
+ hours, rem = divmod(sec, 3600)
+ return f"{hours}h{rem // 60:02d}m"
+
+
+def _build_session_duration_html(sessions: list[dict], tz_label: str,
+ tz_offset_hours: float) -> str:
+ """Build a per-session duration + burn-rate card.
+
+ Shows the most-recent 10 sessions (newest first) with wall-clock time,
+ turn count, total cost, tokens/min, and cost/min. Answers "how much
+ am I spending per active minute" for a given session.
+ """
+ rows_data = []
+ for s in sessions:
+ stats = _session_duration_stats(s)
+ if not stats:
+ continue
+ rows_data.append((s, stats))
+ if not rows_data:
+ return ""
+ offset_sec = int(tz_offset_hours * 3600)
+
+ def fmt_local(epoch: int) -> str:
+ return datetime.fromtimestamp(
+ epoch + offset_sec, tz=timezone.utc,
+ ).strftime("%Y-%m-%d %H:%M")
+
+ rows_data.sort(key=lambda x: x[1]["last_epoch"], reverse=True)
+ rows_data = rows_data[:10]
+ rows_html = []
+ for s, st in rows_data:
+ sid = s["session_id"][:8]
+ rows_html.append(
+ f'| Session | First turn ({tz_label}) | ' + f'Wall | Turns | ' + f'Cost | tok/min | $/min | \n' + f'
|---|
| Metric | Last 7d | ' + 'Prior 7d | \u0394 | ' + '
|---|
{name}| Name | ' + f'Invocations | ' + f'Turns | ' + f'Input | ' + f'% cached | ' + f'Output | ' + f'Total | ' + f'Cost $ | ' + f'% of total | ' + f'
|---|
{name}| Subagent type | ' + f'Spawns | ' + f'Turns | ' + f'Input | ' + f'% cached | ' + f'Output | ' + f'Total | ' + f'Avg / call | ' + f'Cost $ | ' + f'% of total | ' + f'{warmup_headers}' + f'
|---|
--include-subagents| Signal | Detail |
|---|
{html_mod.escape(sid)}…' + 'Per session, median combined turn cost (parent direct ' + '+ attributed subagent) on turns that spawned a subagent vs. ' + 'turns that did not. Holds task / model / context constant — ' + 'but users tend to delegate the hardest sub-tasks, so this ' + 'still has within-session selection bias and is not ' + 'a counterfactual estimate of "what the same work would have ' + 'cost in the main context".
\n' + '| Session | ' + 'Spawning turns | ' + 'Non-spawning turns | ' + 'Median (spawn) | ' + 'Median (no spawn) | ' + 'Δ (spawn − no spawn) | ' + 'Spawn-turn cost share | ' + '
|---|
/{html_mod.escape(slash)}' if slash else "")
+ context_rows.append(
+ f'{sid8} · {ts}'
+ f' · {snippet}'
+ f'Showing top {max_rows} of {len(breaks)} — ' + f'raw list available in JSON export.
') + return ( + f'{top_h}{top_b}
\n' + f'{top_h}{top_b}
\n' + f'| Category | Turns | % |
|---|
| Length | Turn indices | ' + 'Cost $ |
|---|
{retry["chain_count"]} chain{"s" if retry["chain_count"] != 1 else ""} ' + f'detected · {cost_pct:.1f}% of session cost
' + f'{chain_table}' + f'| File | Reads | ' + 'Cost $ |
|---|
{reaccess["reaccessed_count"]} file{"s" if reaccess["reaccessed_count"] != 1 else ""} ' + f're-read 2+ times · ${tot_cost:.4f} total
' + f'{ra_table}' + f'{verbose["verbose_count"]} Edit turn{"s" if verbose["verbose_count"] != 1 else ""} ' + f'with output > 800 tokens · ${v_tot:.4f} total
' + f'{" · ".join(sr_parts)}{warning}
' + f'