Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 24 additions & 10 deletions internal/files/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@ type DaemonConfig struct {
FSWatch bool
PollInterval time.Duration
LogFunc func(string, ...interface{})
OnReady func()
// OnReady is called once after the initial generate completes.
OnReady func(GraphStats)
// OnUpdate is called after each incremental update completes.
OnUpdate func(GraphStats)
}

// Daemon watches for file changes and keeps sidecars fresh.
Expand All @@ -35,9 +38,10 @@ type Daemon struct {
cache *Cache
logf func(string, ...interface{})

mu sync.Mutex
ir *api.SidecarIR
notifyCh chan string
mu sync.Mutex
ir *api.SidecarIR
notifyCh chan string
loadedCache bool // true if startup data came from local cache
}

// NewDaemon creates a daemon with the given config and API client.
Expand Down Expand Up @@ -70,7 +74,12 @@ func (d *Daemon) Run(ctx context.Context) error {
if err := d.loadOrGenerate(ctx); err != nil {
return fmt.Errorf("startup: %w", err)
}
d.writeStatus(fmt.Sprintf("ready — %s — %d nodes",

d.mu.Lock()
stats := computeStats(d.ir, d.cache)
stats.FromCache = d.loadedCache
d.mu.Unlock()
d.writeStatus(fmt.Sprintf("ready — %s — %d files",
time.Now().Format(time.RFC3339), len(d.ir.Graph.Nodes)))

d.logf("[step:2] Starting listeners")
Expand All @@ -90,7 +99,7 @@ func (d *Daemon) Run(ctx context.Context) error {
d.logf("[step:3] Ready — listening on UDP :%d (debounce %s)", d.cfg.NotifyPort, d.cfg.Debounce)
}
if d.cfg.OnReady != nil {
d.cfg.OnReady()
d.cfg.OnReady(stats)
}

var (
Expand Down Expand Up @@ -147,6 +156,7 @@ func (d *Daemon) loadOrGenerate(ctx context.Context) error {
d.ir = &ir
d.cache = NewCache()
d.cache.Build(&ir)
d.loadedCache = true
d.mu.Unlock()

files := d.cache.SourceFiles()
Expand Down Expand Up @@ -241,16 +251,20 @@ func (d *Daemon) incrementalUpdate(ctx context.Context, changedFiles []string) {

d.logf("Updated %d sidecars", written)

var nodeCount int
var updateStats GraphStats
func() {
d.mu.Lock()
defer d.mu.Unlock()
d.saveCache()
nodeCount = len(d.ir.Graph.Nodes)
updateStats = computeStats(d.ir, d.cache)
}()

d.writeStatus(fmt.Sprintf("ready — %s — %d nodes",
time.Now().Format(time.RFC3339), nodeCount))
d.writeStatus(fmt.Sprintf("ready — %s — %d files",
time.Now().Format(time.RFC3339), updateStats.SourceFiles))

if d.cfg.OnUpdate != nil {
d.cfg.OnUpdate(updateStats)
}
}

// saveCache writes the current merged SidecarIR to the cache file. Must be called with d.mu held.
Expand Down
28 changes: 28 additions & 0 deletions internal/files/graph.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,34 @@ type Cache struct {
FileDomain map[string]string // filePath → domain name
}

// GraphStats summarises what was mapped after a generate or incremental update.
type GraphStats struct {
SourceFiles int
Functions int
Relationships int
DeadFunctionCount int // functions with no callers (proxy for unreachable code)
FromCache bool // true when data was loaded from a local cache
}
Comment on lines +48 to +55
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

GraphStats is missing issue-requested health metrics.

The linked objective calls for cycle/dead-function visibility, but the new stats shape only includes files/functions/relationships. That leaves the feature partially delivered for the requested summary.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@internal/files/graph.go` around lines 48 - 54, GraphStats is missing health
metrics for cycles and dead functions; extend the GraphStats struct by adding
fields to capture cycle and dead-function visibility (e.g., CycleCount int,
DeadFunctionCount int, and optionally DeadFunctionNames []string or Cycles
[][]string as appropriate for your consumers), then update all places that
construct/populate GraphStats (functions that return or aggregate GraphStats) to
compute and set these new fields and ensure any JSON/serialization usage is
updated to include them (look for usages of the GraphStats type and builders
that set SourceFiles/Functions/Relationships/FromCache).


// computeStats derives a GraphStats from a SidecarIR and its built Cache.
func computeStats(ir *api.SidecarIR, c *Cache) GraphStats {
s := GraphStats{
Relationships: len(ir.Graph.Relationships),
}
for _, n := range ir.Graph.Nodes {
switch {
case n.HasLabel("File"):
s.SourceFiles++
case n.HasLabel("Function"):
s.Functions++
if len(c.Callers[n.ID]) == 0 {
s.DeadFunctionCount++
}
}
}
return s
}
Comment on lines +57 to +74
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

SourceFiles count can drift from actual “source files” shown elsewhere.

At Line 63-66, counting raw "File" labels from IR can include nodes that aren’t renderable source files, while the codebase already defines canonical source-file filtering via Cache.SourceFiles(). This can produce misleading watch summaries.

💡 Proposed fix
 func computeStats(ir *api.SidecarIR, c *Cache) GraphStats {
 	s := GraphStats{
 		Relationships: len(ir.Graph.Relationships),
 	}
-	for _, n := range ir.Graph.Nodes {
+	if c != nil {
+		s.SourceFiles = len(c.SourceFiles())
+	}
+	for _, n := range ir.Graph.Nodes {
 		switch {
-		case n.HasLabel("File"):
-			s.SourceFiles++
 		case n.HasLabel("Function"):
 			s.Functions++
 		}
 	}
-	_ = c // reserved for future per-file breakdown
 	return s
 }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
// computeStats derives a GraphStats from a SidecarIR and its built Cache.
func computeStats(ir *api.SidecarIR, c *Cache) GraphStats {
s := GraphStats{
Relationships: len(ir.Graph.Relationships),
}
for _, n := range ir.Graph.Nodes {
switch {
case n.HasLabel("File"):
s.SourceFiles++
case n.HasLabel("Function"):
s.Functions++
}
}
_ = c // reserved for future per-file breakdown
return s
}
// computeStats derives a GraphStats from a SidecarIR and its built Cache.
func computeStats(ir *api.SidecarIR, c *Cache) GraphStats {
s := GraphStats{
Relationships: len(ir.Graph.Relationships),
}
if c != nil {
s.SourceFiles = len(c.SourceFiles())
}
for _, n := range ir.Graph.Nodes {
switch {
case n.HasLabel("Function"):
s.Functions++
}
}
return s
}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@internal/files/graph.go` around lines 56 - 71, computeStats currently counts
nodes with the "File" label directly (in the loop over ir.Graph.Nodes) which can
include non-renderable files; instead use the canonical source-file list from
the cache by calling Cache.SourceFiles() to populate GraphStats.SourceFiles
(e.g., set s.SourceFiles = len(c.SourceFiles())) and remove the manual "File"
label increment; ensure you still handle a nil Cache pointer if c can be nil
before calling c.SourceFiles().


// NewCache creates an empty Cache.
func NewCache() *Cache {
return &Cache{
Expand Down
34 changes: 34 additions & 0 deletions internal/files/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,15 @@ import (
"github.com/supermodeltools/cli/internal/ui"
)

// ANSI helpers used only for watch summary output.
const (
ansiReset = "\033[0m"
ansiBold = "\033[1m"
ansiGreen = "\033[32m"
ansiBGreen = "\033[1;32m"
ansiDim = "\033[2m"
)

// GenerateOptions configures the generate command.
type GenerateOptions struct {
Force bool
Expand Down Expand Up @@ -161,6 +170,31 @@ func Watch(ctx context.Context, cfg *config.Config, dir string, opts WatchOption
FSWatch: opts.FSWatch,
PollInterval: pollInterval,
LogFunc: logf,
OnReady: func(s GraphStats) {
src := "fetched"
if s.FromCache {
src = "cached"
}
line := fmt.Sprintf("\n %s✓%s %s%d files%s · %s%d functions%s · %s%d relationships%s",
ansiBGreen, ansiReset,
ansiBold, s.SourceFiles, ansiReset,
ansiBold, s.Functions, ansiReset,
ansiBold, s.Relationships, ansiReset,
)
if s.DeadFunctionCount > 0 {
line += fmt.Sprintf(" · %s%d uncalled%s", ansiBold, s.DeadFunctionCount, ansiReset)
}
line += fmt.Sprintf(" %s(%s)%s\n\n", ansiDim, src, ansiReset)
fmt.Print(line)
},
OnUpdate: func(s GraphStats) {
fmt.Printf(" %s✓%s Updated — %s%d files%s · %s%d functions%s · %s%d relationships%s\n",
ansiGreen, ansiReset,
ansiBold, s.SourceFiles, ansiReset,
ansiBold, s.Functions, ansiReset,
ansiBold, s.Relationships, ansiReset,
)
},
}

d := NewDaemon(daemonCfg, client)
Expand Down
Loading