Phase 2: analysis layer (analyze.js), cache guard, log hygiene

- analyze.js: burn rate, weekly reconstruction, cycle stagger, rotation
  rank, underspend alerts, log prune with weekly archive
- logger.js: getCachedRun(maxAgeMinutes) — skip probing if recent data exists
- monitor.js: cache guard at wake — 20-min dedup, zero extra API calls
- test.js: fix type assertion for gemini-api/xai-api providers (+5 passing);
  add 14 new tests for cache guard and analyze.js (162 total, all green)
- docs/analyze.md: usage reference

Co-authored-by: Hannibal Smith <hannibal@trentuna.com>
This commit is contained in:
Hannibal Smith 2026-04-05 04:49:05 +00:00
parent 1b4e299461
commit 34898b1196
Signed by: hannibal
GPG key ID: 6EB37F7E6190AF1C
6 changed files with 745 additions and 2 deletions

546
analyze.js Normal file
View file

@ -0,0 +1,546 @@
#!/usr/bin/env node
/**
* analyze.js Token Monitor analysis CLI
*
* Reads accumulated JSONL logs from ~/.logs/token-monitor/ and produces:
* - Burn rates per account (utilization delta over time)
* - Weekly budget reconstruction
* - Cycle stagger view (next 48h resets)
* - Rotation recommendations (rule-based)
* - Underspend alerts
* - Log hygiene (--prune)
*
* Usage:
* node analyze.js # full report
* node analyze.js --burn-rate # burn rate section only
* node analyze.js --weekly # weekly reconstruction only
* node analyze.js --stagger # cycle stagger only
* node analyze.js --rotation # rotation recommendation only
* node analyze.js --json # JSON output (all sections)
* node analyze.js --provider team-nadja # filter to one provider
* node analyze.js --prune [--dry-run] # log hygiene
*/
import {
readdirSync, readFileSync, writeFileSync, mkdirSync, unlinkSync, existsSync,
} from 'fs';
import { homedir } from 'os';
import { join } from 'path';
const LOG_DIR = join(homedir(), '.logs', 'token-monitor');
const TEAMS = ['team-vigilio', 'team-ludo', 'team-molto', 'team-nadja', 'team-buio'];
// ── Load logs ────────────────────────────────────────────────────────────────
function loadLogs(providerFilter = null) {
if (!existsSync(LOG_DIR)) return [];
const files = readdirSync(LOG_DIR)
.filter(f => /^\d{4}-\d{2}-\d{2}\.jsonl$/.test(f))
.sort();
const entries = [];
for (const file of files) {
const content = readFileSync(join(LOG_DIR, file), 'utf-8').trim();
if (!content) continue;
for (const line of content.split('\n').filter(Boolean)) {
try {
const entry = JSON.parse(line);
const providers = entry.providers || {};
// Skip test/empty entries — real entries have at least one provider with a type
if (!Object.values(providers).some(p => p && p.type)) continue;
if (providerFilter && !providers[providerFilter]) continue;
entries.push(entry);
} catch { /* skip bad lines */ }
}
}
return entries.sort((a, b) => a.ts.localeCompare(b.ts));
}
// ── Helpers ───────────────────────────────────────────────────────────────────
function formatDuration(seconds) {
if (seconds == null || isNaN(seconds) || seconds < 0) return '?';
const h = Math.floor(seconds / 3600);
const m = Math.floor((seconds % 3600) / 60);
if (h > 0) return `${h}h ${m > 0 ? m + 'm' : ''}`.trim();
if (m > 0) return `${m}m`;
return `${Math.round(seconds)}s`;
}
function pct(v) {
if (v == null) return '?';
return `${Math.round(v * 100)}%`;
}
function getISOWeek(dateStr) {
const d = new Date(dateStr);
d.setUTCHours(12, 0, 0, 0);
d.setUTCDate(d.getUTCDate() + 3 - (d.getUTCDay() + 6) % 7);
const week1 = new Date(Date.UTC(d.getUTCFullYear(), 0, 4));
const weekNum = 1 + Math.round(((d - week1) / 86400000 - 3 + (week1.getUTCDay() + 6) % 7) / 7);
return `${d.getUTCFullYear()}-W${String(weekNum).padStart(2, '0')}`;
}
function latestPerProvider(entries, typeFilter = null) {
const latest = {};
for (const entry of entries) {
for (const [name, p] of Object.entries(entry.providers || {})) {
if (typeFilter && p?.type !== typeFilter) continue;
if (!latest[name] || entry.ts > latest[name].ts) {
latest[name] = { ts: entry.ts, p };
}
}
}
return latest;
}
// ── Burn rate ─────────────────────────────────────────────────────────────────
function computeBurnRate(entries, providerName) {
const pts = entries
.filter(e => e.providers[providerName]?.utilization_7d != null)
.map(e => ({
ts: new Date(e.ts).getTime(),
util7d: e.providers[providerName].utilization_7d,
}));
if (pts.length < 2) return null;
const first = pts[0];
const last = pts[pts.length - 1];
const hours = (last.ts - first.ts) / 3_600_000;
if (hours < 0.01) return null;
const rate = (last.util7d - first.util7d) / hours;
const exhaustion = rate > 0 ? (1 - last.util7d) / rate : null;
return {
rate_per_hour: rate,
projected_exhaustion_hours: exhaustion,
current_util_7d: last.util7d,
first_util_7d: first.util7d,
data_points: pts.length,
hours_elapsed: hours,
first_ts: new Date(first.ts).toISOString(),
last_ts: new Date(last.ts).toISOString(),
};
}
// ── Weekly reconstruction ─────────────────────────────────────────────────────
function reconstructWeekly(entries) {
const weeks = {};
for (const entry of entries) {
const week = getISOWeek(entry.ts);
if (!weeks[week]) weeks[week] = { providers: {} };
const w = weeks[week];
const dateStr = entry.ts.slice(0, 10);
if (!w.start || dateStr < w.start) w.start = dateStr;
if (!w.end || dateStr > w.end) w.end = dateStr;
for (const [name, p] of Object.entries(entry.providers || {})) {
if (p?.type !== 'teams-direct') continue;
if (!w.providers[name]) {
w.providers[name] = {
samples: 0, peak_util_5h: 0, peak_util_7d: 0,
_total_util_7d: 0, exhausted_count: 0,
};
}
const s = w.providers[name];
s.samples++;
if (p.utilization_5h != null) s.peak_util_5h = Math.max(s.peak_util_5h, p.utilization_5h);
if (p.utilization_7d != null) {
s.peak_util_7d = Math.max(s.peak_util_7d, p.utilization_7d);
s._total_util_7d += p.utilization_7d;
}
if (p.status === 'rejected') s.exhausted_count++;
}
}
// Finalize averages, remove internal accumulator
for (const w of Object.values(weeks)) {
for (const s of Object.values(w.providers)) {
s.avg_util_7d = s.samples > 0 ? s._total_util_7d / s.samples : 0;
delete s._total_util_7d;
}
}
return weeks;
}
// ── Cycle stagger ─────────────────────────────────────────────────────────────
function cycleStagger(entries) {
const latest = latestPerProvider(entries, 'teams-direct');
const now = Date.now();
const results = [];
for (const [provider, { ts, p }] of Object.entries(latest)) {
if (p.reset_in_seconds == null) continue;
const entryAgeSeconds = (now - new Date(ts).getTime()) / 1000;
const resetInSecondsNow = Math.max(0, p.reset_in_seconds - entryAgeSeconds);
if (resetInSecondsNow > 172_800) continue; // > 48h, skip
results.push({
provider,
resets_at_iso: new Date(now + resetInSecondsNow * 1000).toISOString(),
resets_in_seconds_from_now: Math.round(resetInSecondsNow),
});
}
return results.sort((a, b) => a.resets_in_seconds_from_now - b.resets_in_seconds_from_now);
}
// ── Underspend alerts ─────────────────────────────────────────────────────────
function underspendAlerts(entries) {
const latest = latestPerProvider(entries, 'teams-direct');
const alerts = [];
for (const [provider, { p }] of Object.entries(latest)) {
if (p.utilization_5h == null) continue;
if (p.status !== 'allowed') continue;
if (p.utilization_5h < 0.60 && p.reset_in_seconds != null && p.reset_in_seconds < 7200) {
alerts.push({ provider, utilization_5h: p.utilization_5h, reset_in_seconds: p.reset_in_seconds });
}
}
return alerts;
}
// ── Rotation rank ─────────────────────────────────────────────────────────────
function rotationRank(entries) {
const latest = latestPerProvider(entries, 'teams-direct');
const ranked = [];
for (const name of TEAMS) {
const rec = latest[name];
if (!rec) continue;
const p = rec.p;
let score, reason, severity;
if (p.status === 'invalid_key') {
score = -200;
reason = '401 invalid key — cannot use';
severity = 'unknown';
} else if (p.status === 'rejected') {
const resetIn = p.reset_in_seconds || 999_999;
// Among maxed accounts, soonest reset gets slight priority
score = -100 + (1 / (resetIn + 1));
reason = `MAXED — avoid until reset in ${formatDuration(resetIn)}`;
severity = 'critical';
} else if (p.utilization_7d == null && p.utilization_5h == null) {
score = 50;
reason = 'DORMANT — hold in reserve for cycle staggering';
severity = 'dormant';
} else {
const headroom = 1 - (p.utilization_7d || 0);
score = headroom * 100;
if (score < 30) {
reason = `low headroom — 7d: ${pct(p.utilization_7d)}, use cautiously`;
severity = 'warning';
} else {
const resetStr = p.reset_in_seconds != null ? `, resets ${formatDuration(p.reset_in_seconds)}` : '';
reason = `${pct(headroom)} headroom — 7d: ${pct(p.utilization_7d)}${resetStr}`;
severity = 'ok';
}
}
ranked.push({ provider: name, score, reason, severity });
}
return ranked.sort((a, b) => b.score - a.score);
}
// ── Log hygiene ───────────────────────────────────────────────────────────────
function pruneLogs(dryRun = false) {
if (!existsSync(LOG_DIR)) {
console.log('No log directory — nothing to prune.');
return;
}
const files = readdirSync(LOG_DIR).filter(f => /^\d{4}-\d{2}-\d{2}\.jsonl$/.test(f));
const cutoff = new Date(Date.now() - 30 * 86_400_000).toISOString().slice(0, 10);
const toPrune = files.filter(f => f.slice(0, 10) < cutoff);
if (toPrune.length === 0) {
console.log('No files older than 30 days — nothing to prune.');
return;
}
const weeksDir = join(LOG_DIR, 'weeks');
if (!dryRun) mkdirSync(weeksDir, { recursive: true });
const weeklyAgg = {};
for (const file of toPrune) {
const dateStr = file.slice(0, 10);
const week = getISOWeek(dateStr + 'T12:00:00Z');
const content = readFileSync(join(LOG_DIR, file), 'utf-8').trim();
if (!weeklyAgg[week]) {
weeklyAgg[week] = { week, start: dateStr, end: dateStr, providers: {} };
}
const w = weeklyAgg[week];
if (dateStr < w.start) w.start = dateStr;
if (dateStr > w.end) w.end = dateStr;
for (const line of content.split('\n').filter(Boolean)) {
try {
const entry = JSON.parse(line);
for (const [name, p] of Object.entries(entry.providers || {})) {
if (p?.type !== 'teams-direct') continue;
if (!w.providers[name]) {
w.providers[name] = {
samples: 0, peak_util_5h: 0, peak_util_7d: 0, avg_util_7d: 0, exhausted_count: 0,
};
}
const s = w.providers[name];
if (p.utilization_5h != null) s.peak_util_5h = Math.max(s.peak_util_5h, p.utilization_5h);
if (p.utilization_7d != null) {
s.peak_util_7d = Math.max(s.peak_util_7d, p.utilization_7d);
s.avg_util_7d = (s.avg_util_7d * s.samples + p.utilization_7d) / (s.samples + 1);
}
s.samples++;
if (p.status === 'rejected') s.exhausted_count++;
}
} catch { /* skip */ }
}
}
let pruned = 0;
for (const [week, data] of Object.entries(weeklyAgg)) {
const weekFile = join(weeksDir, `${week}.json`);
if (dryRun) {
console.log(`[dry-run] Would write ${weekFile}`);
} else {
if (existsSync(weekFile)) {
// Merge with existing weekly file
const existing = JSON.parse(readFileSync(weekFile, 'utf-8'));
for (const [name, s] of Object.entries(data.providers)) {
if (!existing.providers[name]) { existing.providers[name] = s; continue; }
const e = existing.providers[name];
const totalSamples = e.samples + s.samples;
e.peak_util_5h = Math.max(e.peak_util_5h, s.peak_util_5h);
e.peak_util_7d = Math.max(e.peak_util_7d, s.peak_util_7d);
e.avg_util_7d = (e.avg_util_7d * e.samples + s.avg_util_7d * s.samples) / totalSamples;
e.samples = totalSamples;
e.exhausted_count += s.exhausted_count;
}
writeFileSync(weekFile, JSON.stringify(existing, null, 2));
} else {
writeFileSync(weekFile, JSON.stringify(data, null, 2));
}
}
}
for (const file of toPrune) {
if (dryRun) {
console.log(`[dry-run] Would delete ${join(LOG_DIR, file)}`);
} else {
unlinkSync(join(LOG_DIR, file));
pruned++;
}
}
if (dryRun) {
console.log(`[dry-run] Would prune ${toPrune.length} file(s) into ${Object.keys(weeklyAgg).length} weekly summary file(s).`);
} else {
console.log(`Pruned ${pruned} file(s) into ${Object.keys(weeklyAgg).length} weekly summary file(s).`);
}
}
// ── Report generation ─────────────────────────────────────────────────────────
function generateFullReport(entries) {
const ts = new Date().toISOString().replace('T', ' ').replace(/\.\d+Z$/, ' UTC');
const width = 56;
const lines = [
`Token Analysis — ${ts}`,
'═'.repeat(width),
'',
];
// ── Burn rates
lines.push('Burn Rate');
const latestTeams = latestPerProvider(entries, 'teams-direct');
let anyTeams = false;
for (const name of TEAMS) {
const rec = latestTeams[name];
if (!rec) continue;
anyTeams = true;
const p = rec.p;
const br = computeBurnRate(entries, name);
const pad = name.padEnd(16);
if (p.status === 'invalid_key') {
lines.push(` ${pad} 401 invalid key`);
} else if (p.status === 'rejected') {
lines.push(` ${pad} MAXED — resets in ${formatDuration(p.reset_in_seconds)}`);
} else if (p.utilization_7d == null && p.utilization_5h == null) {
lines.push(` ${pad} DORMANT — cycle not started`);
} else if (br && br.data_points >= 2) {
const rateStr = `${(br.rate_per_hour * 100).toFixed(1)}%/hr`;
const exhStr = br.projected_exhaustion_hours != null
? `exhausts ~${Math.round(br.projected_exhaustion_hours)}h`
: 'stable/declining';
lines.push(` ${pad} 7d: ${pct(br.first_util_7d)}${pct(br.current_util_7d)} over ${br.hours_elapsed.toFixed(1)}h = ${rateStr} | ${exhStr} | ${br.data_points} pts`);
} else {
lines.push(` ${pad} 7d: ${pct(p.utilization_7d)} (insufficient data for rate)`);
}
}
if (!anyTeams) lines.push(' (no teams data in logs)');
lines.push('');
// ── Reset schedule
const stagger = cycleStagger(entries);
if (stagger.length > 0) {
lines.push('Reset Schedule (next 48h)');
for (const { provider, resets_at_iso, resets_in_seconds_from_now } of stagger) {
const timeStr = resets_at_iso.slice(11, 16) + ' UTC';
lines.push(` ${provider.padEnd(16)} ~${formatDuration(resets_in_seconds_from_now).padEnd(10)} (${timeStr})`);
}
lines.push('');
}
// ── Weekly
const weekly = reconstructWeekly(entries);
const weekKeys = Object.keys(weekly).sort();
if (weekKeys.length > 0) {
lines.push('Weekly Reconstruction');
for (const week of weekKeys) {
const w = weekly[week];
const note = w.start === w.end ? ' (1 day)' : '';
lines.push(` ${week}${note}`);
for (const [name, s] of Object.entries(w.providers)) {
const exhStr = s.exhausted_count > 0 ? ` | exhausted: ${s.exhausted_count}x` : '';
lines.push(` ${name.padEnd(14)} peak 7d: ${pct(s.peak_util_7d)} | avg: ${pct(s.avg_util_7d)} | ${s.samples} samples${exhStr}`);
}
}
lines.push('');
}
// ── Rotation
const rotation = rotationRank(entries);
if (rotation.length > 0) {
lines.push('Rotation Recommendation');
rotation.forEach(({ provider, reason, severity }, i) => {
const icon = severity === 'ok' ? '✓' : severity === 'critical' ? '✗' : severity === 'dormant' ? '~' : '?';
lines.push(` ${i + 1}. ${provider.padEnd(16)} ${icon} ${reason}`);
});
lines.push('');
}
// ── Underspend alerts
const boosts = underspendAlerts(entries);
if (boosts.length > 0) {
lines.push('⚡ Underspend Alerts (burn before reset)');
for (const { provider, utilization_5h, reset_in_seconds } of boosts) {
lines.push(` ${provider}: 5h at ${pct(utilization_5h)}, resets in ${formatDuration(reset_in_seconds)}`);
}
lines.push('');
}
if (entries.length === 0) {
lines.push('No log data found. Run monitor.js to start accumulating data.');
}
return lines.join('\n');
}
// ── Main ──────────────────────────────────────────────────────────────────────
const args = process.argv.slice(2);
const showBurnRate = args.includes('--burn-rate');
const showWeekly = args.includes('--weekly');
const showStagger = args.includes('--stagger');
const showRotation = args.includes('--rotation');
const isJson = args.includes('--json');
const isPrune = args.includes('--prune');
const isDryRun = args.includes('--dry-run');
const providerIdx = args.indexOf('--provider');
const providerFilter = providerIdx !== -1 ? args[providerIdx + 1] : null;
const showAll = !showBurnRate && !showWeekly && !showStagger && !showRotation && !isPrune;
if (isPrune) {
pruneLogs(isDryRun);
process.exit(0);
}
const entries = loadLogs(providerFilter);
if (isJson) {
const burnRates = {};
for (const name of TEAMS) {
const br = computeBurnRate(entries, name);
if (br) burnRates[name] = br;
}
console.log(JSON.stringify({
timestamp: new Date().toISOString(),
burn_rates: burnRates,
weekly: reconstructWeekly(entries),
stagger: cycleStagger(entries),
rotation: rotationRank(entries),
underspend_alerts: underspendAlerts(entries),
}, null, 2));
process.exit(0);
}
if (showAll) {
console.log(generateFullReport(entries));
process.exit(0);
}
// Section-specific output
const width = 56;
if (showBurnRate) {
console.log('Burn Rate\n' + '─'.repeat(width));
const latestTeams = latestPerProvider(entries, 'teams-direct');
for (const name of TEAMS) {
const rec = latestTeams[name];
if (!rec) continue;
const br = computeBurnRate(entries, name);
if (br && br.data_points >= 2) {
const exhStr = br.projected_exhaustion_hours != null
? `exhausts ~${Math.round(br.projected_exhaustion_hours)}h`
: 'stable/declining';
console.log(` ${name.padEnd(16)} ${(br.rate_per_hour * 100).toFixed(1)}%/hr | ${exhStr} | ${br.data_points} pts`);
} else {
console.log(` ${name.padEnd(16)} insufficient data`);
}
}
}
if (showWeekly) {
console.log('Weekly Reconstruction\n' + '─'.repeat(width));
const weekly = reconstructWeekly(entries);
for (const [week, w] of Object.entries(weekly).sort()) {
console.log(` ${week}`);
for (const [name, s] of Object.entries(w.providers)) {
console.log(` ${name.padEnd(14)} peak 7d: ${pct(s.peak_util_7d)} | avg: ${pct(s.avg_util_7d)} | ${s.samples} samples`);
}
}
}
if (showStagger) {
console.log('Reset Schedule (next 48h)\n' + '─'.repeat(width));
for (const { provider, resets_in_seconds_from_now, resets_at_iso } of cycleStagger(entries)) {
const timeStr = resets_at_iso.slice(11, 16) + ' UTC';
console.log(` ${provider.padEnd(16)} ~${formatDuration(resets_in_seconds_from_now)} (${timeStr})`);
}
}
if (showRotation) {
console.log('Rotation Recommendation\n' + '─'.repeat(width));
rotationRank(entries).forEach(({ provider, reason, severity }, i) => {
const icon = severity === 'ok' ? '✓' : severity === 'critical' ? '✗' : '~';
console.log(` ${i + 1}. ${provider.padEnd(16)} ${icon} ${reason}`);
});
}

54
docs/analyze.md Normal file
View file

@ -0,0 +1,54 @@
# analyze.js — Token Monitor Analysis CLI
Reads accumulated JSONL logs from `~/.logs/token-monitor/` and produces burn
rates, weekly stats, reset schedule, and rotation recommendations.
## Usage
```
node analyze.js # full report (default)
node analyze.js --burn-rate # burn rate per account only
node analyze.js --weekly # weekly budget reconstruction
node analyze.js --stagger # reset schedule (next 48h)
node analyze.js --rotation # rotation recommendation only
node analyze.js --json # JSON output (all sections)
node analyze.js --provider team-nadja # filter to one provider
node analyze.js --prune # prune logs older than 30 days
node analyze.js --prune --dry-run # dry run — show what would be pruned
```
## Output sections
**Burn Rate** — delta analysis of 7d utilization over time, projected
exhaustion at current rate. Requires ≥ 2 data points per provider.
**Reset Schedule** — providers resetting within the next 48 hours, sorted
ascending by time to reset.
**Weekly Reconstruction** — peak and average 7d utilization per provider per
ISO week. Shows exhaustion events (status=rejected).
**Rotation Recommendation** — ranked provider list. Rules in priority order:
1. Invalid key → bottom (unusable)
2. Maxed/rejected → deprioritize; soonest reset wins tiebreaker
3. Dormant → reserve for cycle staggering
4. Active: rank by headroom (1 - utilization_7d)
**Underspend Alerts** — active accounts with ≥ 40% of 5h window unused and
< 2h until reset. These tokens expire unused boost them.
## Log format
Input: `~/.logs/token-monitor/YYYY-MM-DD.jsonl` — one JSON object per line.
Pruned archives: `~/.logs/token-monitor/weeks/YYYY-WNN.json` — weekly
aggregates with peak/avg utilization and sample counts.
## Cadence
Logs are written by `monitor.js` at each Vigilio wake (max once per 20
minutes — cache guard prevents double-logging within a session). Expected:
~1520 data points/day per active provider, ~100140/week.
At target cadence, log footprint is < 1MB/month. --prune trims files older
than 30 days after archiving them into weekly summaries.

View file

@ -0,0 +1,54 @@
## Pre-Build Gate
**Verdict: PASS**
**Gate assessed:** 2026-04-05 (assessment #7 — replaces #6)
**Mission:** token-monitor-phase2 (trentuna/token-monitor#1)
**Assessor:** Amy Allen
---
### What I checked
1. **Mission spec (Forgejo #1):** ✅ Complete. Four deliverables with operational outcomes: cache guard, analyze.js with six subcommands, log hygiene with prune, token-status.sh integration.
2. **Architecture (Hannibal's comment on #1):** ✅ Thorough. Function signatures (`getCachedRun`, `loadLogs`, `computeBurnRate`, `reconstructWeekly`, `cycleStagger`, `underspendAlerts`, `rotationRank`), CLI interface, output formats, internal module contracts all specified. Single new file + minimal mods to existing code. Low blast radius.
3. **Objective clarity:** ✅ Unambiguous. Each deliverable has specified behavior and output format.
4. **Success criteria testability:** ✅ Five explicit assertions in Hannibal's architecture — all concrete and automatable:
- `node analyze.js` exits 0 with non-empty output from existing log data
- `node analyze.js --rotation` outputs a ranked list
- Two consecutive `node monitor.js --json` within 20 min: second returns cached data, no new JSONL entry
- `node analyze.js --prune --dry-run` reports files without deleting
- `node test.js` still passes
5. **Test baseline (prior Concern 1):****RESOLVED.** `test.js:102` whitelist now includes `'gemini-api'` and `'xai-api'`. All 146 tests pass (verified just now). The "tests still pass" criterion is now meetable.
6. **Recon completeness:** ✅ No external unknowns. All data sources are local JSONL files (102 entries across 2 days — sufficient for immediate testing). No Face recon needed. `token-status.sh` already logs by default (no `--no-log` flag present).
7. **Role assignments:** ✅ Explicit in both the issue and architecture comment.
8. **Brief quality per mission-standards.md:**
- [x] Objective describes operational outcome (trend-line intelligence, not just "a script")
- [x] Success criteria are testable assertions (5 concrete checks)
- [x] Role assignments name who does what
- [x] No agent-affecting changes requiring self-verification
### What's clean
- Spec + architecture together form one of the cleanest briefs I've reviewed
- Function signatures, output formats, internal module contracts all specified
- Low blast radius: one new file (`analyze.js`), two small mods (`logger.js`, `monitor.js`), one external mod (`token-status.sh`)
- No external dependencies or API unknowns
- 102 real log entries in `~/.logs/token-monitor/` for immediate testing
- Stealth constraint clearly specified with sound cache guard design
- Prior blocking concern (test baseline) is now resolved
### Outstanding items
None. B.A. is clear to build.
---
*Triple A reporting. Seventh assessment — the test baseline is fixed, all 146 pass, and the spec remains excellent. No concerns remain. Hannibal's architecture is thorough and B.A. has everything he needs. PASS.*

View file

@ -2,7 +2,7 @@
* logger.js persistent JSONL log to ~/.logs/token-monitor/YYYY-MM-DD.jsonl
*/
import { appendFileSync, mkdirSync } from 'fs';
import { appendFileSync, mkdirSync, existsSync, readFileSync } from 'fs';
import { homedir } from 'os';
import { join } from 'path';
@ -17,3 +17,31 @@ export function getLogPath() {
const today = new Date().toISOString().slice(0, 10);
return join(homedir(), '.logs', 'token-monitor', `${today}.jsonl`);
}
/**
* Returns the last logged run if it was within maxAgeMinutes, otherwise null.
* Skips test/empty entries (entries where providers has no typed providers).
*/
export function getCachedRun(maxAgeMinutes = 20) {
const dir = join(homedir(), '.logs', 'token-monitor');
const today = new Date().toISOString().slice(0, 10);
const file = join(dir, `${today}.jsonl`);
if (!existsSync(file)) return null;
const lines = readFileSync(file, 'utf-8').trim().split('\n').filter(Boolean);
for (let i = lines.length - 1; i >= 0; i--) {
try {
const entry = JSON.parse(lines[i]);
const providers = entry.providers || {};
// Skip test/empty entries — real entries have at least one provider with a type
const hasRealData = Object.values(providers).some(p => p && p.type);
if (!hasRealData) continue;
const ageMinutes = (Date.now() - new Date(entry.ts).getTime()) / 60000;
if (ageMinutes <= maxAgeMinutes) return entry;
return null; // last real entry is too old
} catch { continue; }
}
return null;
}

View file

@ -58,6 +58,20 @@ async function probeProvider(p) {
}
async function main() {
// Cache guard — return last logged run if within 20 minutes (skip on --summary, --no-log, --provider filter)
if (!noLog && !filterProvider && !isSummaryOnly) {
const { getCachedRun } = await import('./logger.js');
const cached = getCachedRun(20);
if (cached) {
if (isJson) {
console.log(JSON.stringify(cached, null, 2));
} else {
console.log(generateReport(cached));
}
return;
}
}
const allProviders = getProviders();
const providerNames = filterProvider
? [filterProvider]

49
test.js
View file

@ -99,7 +99,7 @@ assert('api-ateam in registry', names.includes('api-ateam'));
assert('zai NOT in registry (not anthropic-messages)', !names.includes('zai'));
for (const [name, p] of Object.entries(providers)) {
assert(`${name} has baseUrl`, typeof p.baseUrl === 'string' && p.baseUrl.length > 0);
assert(`${name} has type`, ['teams-direct', 'shelley-proxy', 'api-direct'].includes(p.type));
assert(`${name} has type`, ['teams-direct', 'shelley-proxy', 'api-direct', 'gemini-api', 'xai-api'].includes(p.type));
}
// ── 5. Teams header parser ───────────────────────────────────────────────────
@ -415,6 +415,53 @@ assert('xai 429: severity = critical', x429.severity === 'critical');
const x401 = parseXaiHeaders(null, 401, 'xai-key');
assert('xai 401: status = invalid_key', x401.status === 'invalid_key');
// ── 13. Cache guard (getCachedRun) ─────────────────────────────────────────
console.log('\n── 13. Cache guard ─────────────────────────────────────────────');
const { getCachedRun } = await import('./logger.js');
assert('getCachedRun(0) returns null (zero-minute threshold)', getCachedRun(0) === null);
const cached20 = getCachedRun(20);
assert('getCachedRun(20) returns null or valid object',
cached20 === null || (typeof cached20 === 'object' && typeof cached20.ts === 'string'));
// ── 14. analyze.js smoke tests ───────────────────────────────────────────────
console.log('\n── 14. analyze.js ──────────────────────────────────────────────');
assert('analyze.js exists', existsSync(join(root, 'analyze.js')));
assert('docs/analyze.md exists', existsSync(join(root, 'docs', 'analyze.md')));
const analyzeResult = runSafe('node analyze.js');
assert('analyze.js exits 0', analyzeResult.code === 0,
`exit code: ${analyzeResult.code}\n${analyzeResult.stderr}`);
assert('analyze.js produces output', analyzeResult.stdout.length > 0);
const analyzeJson = runSafe('node analyze.js --json');
assert('analyze.js --json exits 0', analyzeJson.code === 0,
`exit code: ${analyzeJson.code}\n${analyzeJson.stderr}`);
let analyzeData;
try {
analyzeData = JSON.parse(analyzeJson.stdout);
assert('analyze.js --json is valid JSON', true);
} catch (e) {
assert('analyze.js --json is valid JSON', false, e.message);
}
if (analyzeData) {
assert('analyze.js --json has burn_rates', 'burn_rates' in analyzeData);
assert('analyze.js --json has stagger', 'stagger' in analyzeData);
assert('analyze.js --json has rotation', 'rotation' in analyzeData);
assert('analyze.js --json has weekly', 'weekly' in analyzeData);
assert('analyze.js --json stagger is array', Array.isArray(analyzeData.stagger));
assert('analyze.js --json rotation is array', Array.isArray(analyzeData.rotation));
}
const analyzeRotation = runSafe('node analyze.js --rotation');
assert('analyze.js --rotation exits 0', analyzeRotation.code === 0,
`exit code: ${analyzeRotation.code}\n${analyzeRotation.stderr}`);
const analyzePruneDry = runSafe('node analyze.js --prune --dry-run');
assert('analyze.js --prune --dry-run exits 0', analyzePruneDry.code === 0,
`exit code: ${analyzePruneDry.code}\n${analyzePruneDry.stderr}`);
// ── Results ──────────────────────────────────────────────────────────────────
console.log('\n' + '═'.repeat(50));
console.log(`Tests: ${passed + failed} | Passed: ${passed} | Failed: ${failed}`);