diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2895db7..8236713 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -18,7 +18,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: '1.23' + go-version: '1.25' cache: true - name: Download dependencies diff --git a/cmd/prx/main.go b/cmd/prx/main.go index 0d5c561..6532297 100644 --- a/cmd/prx/main.go +++ b/cmd/prx/main.go @@ -1,4 +1,5 @@ -// Package main provides the prx command-line tool for analyzing GitHub pull requests. +// Package main provides the prx command-line tool for analyzing pull requests +// from GitHub, GitLab, and Codeberg/Gitea. package main import ( @@ -7,31 +8,34 @@ import ( "errors" "flag" "fmt" - "log" "log/slog" - "net/url" "os" - "os/exec" - "strconv" - "strings" "time" "github.com/codeGROOVE-dev/fido/pkg/store/null" "github.com/codeGROOVE-dev/prx/pkg/prx" + "github.com/codeGROOVE-dev/prx/pkg/prx/auth" + "github.com/codeGROOVE-dev/prx/pkg/prx/gitea" + "github.com/codeGROOVE-dev/prx/pkg/prx/github" + "github.com/codeGROOVE-dev/prx/pkg/prx/gitlab" ) -const ( - expectedURLParts = 4 - pullPathIndex = 2 - pullPathValue = "pull" +var ( + debug = flag.Bool("debug", false, "Enable debug logging") + noCache = flag.Bool("no-cache", false, "Disable caching") + referenceTimeStr = flag.String("reference-time", "", "Reference time for cache validation (RFC3339 format)") ) func main() { - debug := flag.Bool("debug", false, "Enable debug logging") - noCache := flag.Bool("no-cache", false, "Disable caching") - referenceTimeStr := flag.String("reference-time", "", "Reference time for cache validation (RFC3339 format, e.g., 2025-03-16T06:18:08Z)") flag.Parse() + if err := run(); err != nil { + fmt.Fprintf(os.Stderr, "prx: %v\n", err) + os.Exit(1) + } +} + +func run() error { if *debug { slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ Level: slog.LevelDebug, @@ -39,9 +43,8 @@ func main() { } if flag.NArg() != 1 { - fmt.Fprintf(os.Stderr, "Usage: %s [--debug] [--no-cache] [--reference-time=TIME] \n", os.Args[0]) - fmt.Fprintf(os.Stderr, "Example: %s https://github.com/golang/go/pull/12345\n", os.Args[0]) - os.Exit(1) + printUsage() + return errors.New("expected exactly one argument (pull request URL)") } // Parse reference time if provided @@ -50,91 +53,89 @@ func main() { var err error referenceTime, err = time.Parse(time.RFC3339, *referenceTimeStr) if err != nil { - log.Printf("Invalid reference time format (use RFC3339, e.g., 2025-03-16T06:18:08Z): %v", err) - os.Exit(1) + return fmt.Errorf("invalid reference time format (use RFC3339, e.g., 2025-03-16T06:18:08Z): %w", err) } } prURL := flag.Arg(0) - owner, repo, prNumber, err := parsePRURL(prURL) + parsed, err := prx.ParseURL(prURL) if err != nil { - log.Printf("Invalid PR URL: %v", err) - os.Exit(1) + return fmt.Errorf("invalid PR URL: %w", err) } - token, err := githubToken() - if err != nil { - log.Printf("Failed to get GitHub token: %v", err) - os.Exit(1) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Resolve token based on platform + resolver := auth.NewResolver() + platform := auth.DetectPlatform(parsed.Platform) + + token, err := resolver.Resolve(ctx, platform, parsed.Host) + // Authentication is optional for public repos on GitLab/Gitea/Codeberg + // Only GitHub strictly requires authentication for most API calls + tokenOptional := parsed.Platform != prx.PlatformGitHub + + if err != nil && !tokenOptional { + return fmt.Errorf("authentication failed: %w", err) } - var opts []prx.Option - if *debug { - opts = append(opts, prx.WithLogger(slog.Default())) + var tokenValue string + if token != nil { + tokenValue = token.Value + slog.Debug("Using token", "source", token.Source, "host", token.Host) + } else { + slog.Debug("No authentication token found, proceeding without authentication") } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() + // Create platform-specific client + var prxPlatform prx.Platform + switch parsed.Platform { + case prx.PlatformGitHub: + prxPlatform = github.NewPlatform(tokenValue) + case prx.PlatformGitLab: + prxPlatform = gitlab.NewPlatform(tokenValue, gitlab.WithBaseURL("https://"+parsed.Host)) + case prx.PlatformCodeberg: + prxPlatform = gitea.NewCodebergPlatform(tokenValue) + default: + // Self-hosted Gitea + prxPlatform = gitea.NewPlatform(tokenValue, gitea.WithBaseURL("https://"+parsed.Host)) + } // Configure client options + var opts []prx.Option + if *debug { + opts = append(opts, prx.WithLogger(slog.Default())) + } if *noCache { opts = append(opts, prx.WithCacheStore(null.New[string, prx.PullRequestData]())) } - client := prx.NewClient(token, opts...) - data, err := client.PullRequestWithReferenceTime(ctx, owner, repo, prNumber, referenceTime) + client := prx.NewClientWithPlatform(prxPlatform, opts...) + data, err := client.PullRequestWithReferenceTime(ctx, parsed.Owner, parsed.Repo, parsed.Number, referenceTime) if err != nil { - log.Printf("Failed to fetch PR data: %v", err) - cancel() - os.Exit(1) //nolint:gocritic // False positive: cancel() is called immediately before os.Exit() + return fmt.Errorf("failed to fetch PR data: %w", err) } encoder := json.NewEncoder(os.Stdout) if err := encoder.Encode(data); err != nil { - log.Printf("Failed to encode pull request: %v", err) - cancel() - os.Exit(1) - } - - cancel() // Ensure context is cancelled before exit -} - -func githubToken() (string, error) { - cmd := exec.CommandContext(context.Background(), "gh", "auth", "token") - output, err := cmd.Output() - if err != nil { - return "", fmt.Errorf("failed to run 'gh auth token': %w", err) - } - - token := strings.TrimSpace(string(output)) - if token == "" { - return "", errors.New("no token returned by 'gh auth token'") + return fmt.Errorf("failed to encode pull request: %w", err) } - return token, nil + return nil } -//nolint:revive // function-result-limit: function needs all 4 return values -func parsePRURL(prURL string) (owner, repo string, prNumber int, err error) { - u, err := url.Parse(prURL) - if err != nil { - return "", "", 0, err - } - - if u.Host != "github.com" { - return "", "", 0, errors.New("not a GitHub URL") - } - - parts := strings.Split(strings.Trim(u.Path, "/"), "/") - if len(parts) != expectedURLParts || parts[pullPathIndex] != pullPathValue { - return "", "", 0, errors.New("invalid PR URL format") - } - - prNumber, err = strconv.Atoi(parts[3]) - if err != nil { - return "", "", 0, fmt.Errorf("invalid PR number: %w", err) - } - - return parts[0], parts[1], prNumber, nil +func printUsage() { + fmt.Fprintf(os.Stderr, "Usage: %s [options] \n\n", os.Args[0]) + fmt.Fprint(os.Stderr, "Supported platforms:\n") + fmt.Fprint(os.Stderr, " GitHub: https://github.com/owner/repo/pull/123\n") + fmt.Fprint(os.Stderr, " GitLab: https://gitlab.com/owner/repo/-/merge_requests/123\n") + fmt.Fprint(os.Stderr, " Codeberg: https://codeberg.org/owner/repo/pulls/123\n\n") + fmt.Fprint(os.Stderr, "Authentication:\n") + fmt.Fprint(os.Stderr, " GitHub: GITHUB_TOKEN env or 'gh auth login'\n") + fmt.Fprint(os.Stderr, " GitLab: GITLAB_TOKEN env or 'glab auth login'\n") + fmt.Fprint(os.Stderr, " Codeberg: CODEBERG_TOKEN env\n") + fmt.Fprint(os.Stderr, " Gitea: GITEA_TOKEN env\n\n") + fmt.Fprint(os.Stderr, "Options:\n") + flag.PrintDefaults() } diff --git a/cmd/prx_compare/main.go b/cmd/prx_compare/main.go index 3ead119..75e2a1a 100644 --- a/cmd/prx_compare/main.go +++ b/cmd/prx_compare/main.go @@ -13,6 +13,7 @@ import ( "strings" "github.com/codeGROOVE-dev/prx/pkg/prx" + "github.com/codeGROOVE-dev/prx/pkg/prx/github" ) const ( @@ -38,7 +39,7 @@ func main() { // Both now use GraphQL, but we'll compare two fetches to ensure consistency fmt.Println("Fetching first time...") - restClient := prx.NewClient(token) + restClient := prx.NewClientWithPlatform(github.NewPlatform(token)) restData, err := restClient.PullRequest(context.TODO(), owner, repo, prNumber) if err != nil { log.Fatalf("First fetch failed: %v", err) @@ -46,7 +47,7 @@ func main() { // Fetch again to compare consistency fmt.Println("Fetching second time...") - graphqlClient := prx.NewClient(token) + graphqlClient := prx.NewClientWithPlatform(github.NewPlatform(token)) graphqlData, err := graphqlClient.PullRequest(context.TODO(), owner, repo, prNumber) if err != nil { log.Fatalf("Second fetch failed: %v", err) diff --git a/go.mod b/go.mod index 47f816d..7757fd4 100644 --- a/go.mod +++ b/go.mod @@ -13,4 +13,5 @@ require ( github.com/codeGROOVE-dev/fido/pkg/store/compress v1.10.0 // indirect github.com/klauspost/compress v1.18.2 // indirect github.com/puzpuzpuz/xsync/v4 v4.2.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index fa73ca7..9685683 100644 --- a/go.sum +++ b/go.sum @@ -14,3 +14,6 @@ github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/puzpuzpuz/xsync/v4 v4.2.0 h1:dlxm77dZj2c3rxq0/XNvvUKISAmovoXF4a4qM6Wvkr0= github.com/puzpuzpuz/xsync/v4 v4.2.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/prx/auth/auth.go b/pkg/prx/auth/auth.go new file mode 100644 index 0000000..8148b46 --- /dev/null +++ b/pkg/prx/auth/auth.go @@ -0,0 +1,248 @@ +// Package auth provides token resolution for different git hosting platforms. +// It supports environment variables and CLI tools for GitHub, GitLab, and Gitea/Codeberg. +package auth + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "gopkg.in/yaml.v3" +) + +// Platform identifies a git hosting platform. +type Platform string + +// Platform constants. +const ( + PlatformGitHub Platform = "github" + PlatformGitLab Platform = "gitlab" + PlatformCodeberg Platform = "codeberg" + PlatformGitea Platform = "gitea" +) + +// TokenSource describes where a token was obtained from. +type TokenSource string + +// Token source constants. +const ( + TokenSourceEnv TokenSource = "env" + TokenSourceCLI TokenSource = "cli" + TokenSourceConfig TokenSource = "config" +) + +// Token represents an authentication token with metadata. +type Token struct { + Value string + Source TokenSource + Host string // For multi-host platforms like GitLab/Gitea +} + +// ErrNoToken is returned when no token could be found. +var ErrNoToken = errors.New("no authentication token found") + +// Resolver resolves authentication tokens for git hosting platforms. +type Resolver struct { + timeout time.Duration +} + +// NewResolver creates a new token resolver. +func NewResolver() *Resolver { + return &Resolver{ + timeout: 10 * time.Second, + } +} + +// ResolveGitHub returns a GitHub token from GITHUB_TOKEN/GH_TOKEN env or gh CLI. +func (r *Resolver) ResolveGitHub(ctx context.Context) (*Token, error) { + // Check environment variables first + for _, envVar := range []string{"GITHUB_TOKEN", "GH_TOKEN"} { + if token := os.Getenv(envVar); token != "" { + token = strings.TrimSpace(token) + if token != "" { + return &Token{Value: token, Source: TokenSourceEnv, Host: "github.com"}, nil + } + } + } + + // Try gh CLI + token, err := r.runCommand(ctx, "gh", "auth", "token") + if err != nil { + return nil, fmt.Errorf("%w: set GITHUB_TOKEN or run 'gh auth login'", ErrNoToken) + } + + return &Token{Value: token, Source: TokenSourceCLI, Host: "github.com"}, nil +} + +// ResolveGitLab returns a GitLab token from GITLAB_TOKEN/GL_TOKEN env or glab CLI. +func (r *Resolver) ResolveGitLab(ctx context.Context, host string) (*Token, error) { + if host == "" { + host = "gitlab.com" + } + + // Check environment variables first + for _, envVar := range []string{"GITLAB_TOKEN", "GL_TOKEN"} { + if token := os.Getenv(envVar); token != "" { + token = strings.TrimSpace(token) + if token != "" { + return &Token{Value: token, Source: TokenSourceEnv, Host: host}, nil + } + } + } + + // Try glab config get token (most reliable method) + token, err := r.runCommand(ctx, "glab", "config", "get", "token", "--host", host) + if err == nil && token != "" { + return &Token{Value: token, Source: TokenSourceCLI, Host: host}, nil + } + + return nil, fmt.Errorf("%w: set GITLAB_TOKEN or run 'glab auth login'", ErrNoToken) +} + +// ResolveGitea returns a Gitea/Codeberg token from env, tea config, or berg CLI. +func (r *Resolver) ResolveGitea(ctx context.Context, host string) (*Token, error) { + if host == "" { + host = "codeberg.org" + } + + isCodeberg := strings.Contains(host, "codeberg.org") + + // Check environment variables first + envVars := []string{"GITEA_TOKEN"} + if isCodeberg { + envVars = append([]string{"CODEBERG_TOKEN"}, envVars...) + } + + for _, envVar := range envVars { + if token := os.Getenv(envVar); token != "" { + token = strings.TrimSpace(token) + if token != "" { + return &Token{Value: token, Source: TokenSourceEnv, Host: host}, nil + } + } + } + + // Try tea config file (~/.config/tea/config.yml) + if token, err := r.readTeaConfig(host); err == nil && token != "" { + return &Token{Value: token, Source: TokenSourceConfig, Host: host}, nil + } + + // Try berg CLI for Codeberg (codeberg-cli) + if isCodeberg { + if token, err := r.runCommand(ctx, "berg", "auth", "token"); err == nil && token != "" { + return &Token{Value: token, Source: TokenSourceCLI, Host: host}, nil + } + } + + // Build helpful error message + if isCodeberg { + return nil, fmt.Errorf("%w: set CODEBERG_TOKEN, run 'tea login add', or run 'berg auth login'", ErrNoToken) + } + return nil, fmt.Errorf("%w: set GITEA_TOKEN or run 'tea login add'", ErrNoToken) +} + +// teaConfig represents the tea CLI config file structure. +type teaConfig struct { + Logins []teaLogin `yaml:"logins"` +} + +type teaLogin struct { + Name string `yaml:"name"` + URL string `yaml:"url"` + Token string `yaml:"token"` + Default bool `yaml:"default"` +} + +// readTeaConfig reads the tea CLI config file and returns a token for the given host. +// It checks ~/.config/tea/config.yml first (where tea stores its config on all platforms), +// then falls back to the OS-specific config directory. +func (*Resolver) readTeaConfig(host string) (string, error) { + homeDir, err := os.UserHomeDir() + if err != nil { + return "", err + } + + // tea uses ~/.config/tea on all platforms + configPath := filepath.Join(homeDir, ".config", "tea", "config.yml") + data, err := os.ReadFile(configPath) + if err != nil { + return "", err + } + + var config teaConfig + if err := yaml.Unmarshal(data, &config); err != nil { + return "", err + } + + // Normalize host for comparison + host = strings.TrimPrefix(host, "https://") + host = strings.TrimPrefix(host, "http://") + host = strings.TrimSuffix(host, "/") + + // Look for matching login + for i := range config.Logins { + loginHost := strings.TrimPrefix(config.Logins[i].URL, "https://") + loginHost = strings.TrimPrefix(loginHost, "http://") + loginHost = strings.TrimSuffix(loginHost, "/") + + if strings.EqualFold(loginHost, host) && config.Logins[i].Token != "" { + return config.Logins[i].Token, nil + } + } + + // If no exact match, try default login + for i := range config.Logins { + if config.Logins[i].Default && config.Logins[i].Token != "" { + return config.Logins[i].Token, nil + } + } + + return "", errors.New("no matching login found in tea config") +} + +// Resolve automatically detects the platform and returns the appropriate token. +func (r *Resolver) Resolve(ctx context.Context, platform Platform, host string) (*Token, error) { + switch platform { + case PlatformGitHub: + return r.ResolveGitHub(ctx) + case PlatformGitLab: + return r.ResolveGitLab(ctx, host) + case PlatformCodeberg, PlatformGitea: + return r.ResolveGitea(ctx, host) + default: + return nil, fmt.Errorf("unknown platform: %s", platform) + } +} + +// runCommand executes a command and returns trimmed stdout. +func (r *Resolver) runCommand(ctx context.Context, name string, args ...string) (string, error) { + ctx, cancel := context.WithTimeout(ctx, r.timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, name, args...) + output, err := cmd.Output() + if err != nil { + return "", err + } + + return strings.TrimSpace(string(output)), nil +} + +// DetectPlatform converts a prx platform string to an auth Platform. +func DetectPlatform(platform string) Platform { + switch platform { + case "github": + return PlatformGitHub + case "gitlab": + return PlatformGitLab + case "codeberg": + return PlatformCodeberg + default: + return PlatformGitea + } +} diff --git a/pkg/prx/auth/auth_test.go b/pkg/prx/auth/auth_test.go new file mode 100644 index 0000000..ec72f04 --- /dev/null +++ b/pkg/prx/auth/auth_test.go @@ -0,0 +1,492 @@ +//nolint:errcheck // os.Unsetenv errors are not critical in tests +package auth + +import ( + "context" + "os" + "path/filepath" + "testing" +) + +func TestDetectPlatform(t *testing.T) { + tests := []struct { + input string + want Platform + }{ + {"github", PlatformGitHub}, + {"gitlab", PlatformGitLab}, + {"codeberg", PlatformCodeberg}, + {"gitea", PlatformGitea}, + {"unknown", PlatformGitea}, // Default to Gitea for unknown + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := DetectPlatform(tt.input) + if got != tt.want { + t.Errorf("DetectPlatform(%q) = %v, want %v", tt.input, got, tt.want) + } + }) + } +} + +func TestResolver_ResolveGitHub_EnvVar(t *testing.T) { + tests := []struct { + name string + envVar string + envValue string + wantToken string + wantErr bool + }{ + { + name: "GITHUB_TOKEN set", + envVar: "GITHUB_TOKEN", + envValue: "ghp_test123", + wantToken: "ghp_test123", + }, + { + name: "GH_TOKEN set", + envVar: "GH_TOKEN", + envValue: "ghp_fallback456", + wantToken: "ghp_fallback456", + }, + { + name: "token with whitespace", + envVar: "GITHUB_TOKEN", + envValue: " ghp_trimmed \n", + wantToken: "ghp_trimmed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + os.Unsetenv("GITHUB_TOKEN") + os.Unsetenv("GH_TOKEN") + t.Setenv(tt.envVar, tt.envValue) + + resolver := NewResolver() + token, err := resolver.ResolveGitHub(context.Background()) + + if tt.wantErr { + if err == nil { + t.Error("Expected error, got nil") + } + return + } + + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + + if token.Value != tt.wantToken { + t.Errorf("Token = %q, want %q", token.Value, tt.wantToken) + } + if token.Source != TokenSourceEnv { + t.Errorf("Source = %v, want %v", token.Source, TokenSourceEnv) + } + if token.Host != "github.com" { + t.Errorf("Host = %q, want %q", token.Host, "github.com") + } + }) + } +} + +func TestResolver_ResolveGitLab_EnvVar(t *testing.T) { + tests := []struct { + name string + host string + envVar string + envValue string + wantToken string + wantHost string + }{ + { + name: "GITLAB_TOKEN set", + host: "", + envVar: "GITLAB_TOKEN", + envValue: "glpat-test123", + wantToken: "glpat-test123", + wantHost: "gitlab.com", + }, + { + name: "GL_TOKEN set", + host: "", + envVar: "GL_TOKEN", + envValue: "glpat-fallback", + wantToken: "glpat-fallback", + wantHost: "gitlab.com", + }, + { + name: "custom host", + host: "gitlab.example.com", + envVar: "GITLAB_TOKEN", + envValue: "glpat-custom", + wantToken: "glpat-custom", + wantHost: "gitlab.example.com", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + os.Unsetenv("GITLAB_TOKEN") + os.Unsetenv("GL_TOKEN") + t.Setenv(tt.envVar, tt.envValue) + + resolver := NewResolver() + token, err := resolver.ResolveGitLab(context.Background(), tt.host) + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + + if token.Value != tt.wantToken { + t.Errorf("Token = %q, want %q", token.Value, tt.wantToken) + } + if token.Source != TokenSourceEnv { + t.Errorf("Source = %v, want %v", token.Source, TokenSourceEnv) + } + if token.Host != tt.wantHost { + t.Errorf("Host = %q, want %q", token.Host, tt.wantHost) + } + }) + } +} + +func TestResolver_ResolveGitea_EnvVar(t *testing.T) { + tests := []struct { + name string + host string + envVar string + envValue string + wantToken string + wantHost string + }{ + { + name: "CODEBERG_TOKEN for codeberg.org", + host: "codeberg.org", + envVar: "CODEBERG_TOKEN", + envValue: "codeberg123", + wantToken: "codeberg123", + wantHost: "codeberg.org", + }, + { + name: "GITEA_TOKEN for codeberg.org", + host: "codeberg.org", + envVar: "GITEA_TOKEN", + envValue: "gitea456", + wantToken: "gitea456", + wantHost: "codeberg.org", + }, + { + name: "GITEA_TOKEN for self-hosted", + host: "gitea.example.com", + envVar: "GITEA_TOKEN", + envValue: "selfhosted789", + wantToken: "selfhosted789", + wantHost: "gitea.example.com", + }, + { + name: "empty host defaults to codeberg", + host: "", + envVar: "CODEBERG_TOKEN", + envValue: "default123", + wantToken: "default123", + wantHost: "codeberg.org", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + os.Unsetenv("CODEBERG_TOKEN") + os.Unsetenv("GITEA_TOKEN") + t.Setenv(tt.envVar, tt.envValue) + + resolver := NewResolver() + token, err := resolver.ResolveGitea(context.Background(), tt.host) + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + + if token.Value != tt.wantToken { + t.Errorf("Token = %q, want %q", token.Value, tt.wantToken) + } + if token.Source != TokenSourceEnv { + t.Errorf("Source = %v, want %v", token.Source, TokenSourceEnv) + } + if token.Host != tt.wantHost { + t.Errorf("Host = %q, want %q", token.Host, tt.wantHost) + } + }) + } +} + +func TestResolver_ResolveGitea_NoToken(t *testing.T) { + os.Unsetenv("CODEBERG_TOKEN") + os.Unsetenv("GITEA_TOKEN") + + resolver := NewResolver() + + t.Run("codeberg without token", func(t *testing.T) { + _, err := resolver.ResolveGitea(context.Background(), "codeberg.org") + if err == nil { + t.Error("Expected error for missing Codeberg token") + } + }) + + t.Run("gitea without token", func(t *testing.T) { + _, err := resolver.ResolveGitea(context.Background(), "gitea.example.com") + if err == nil { + t.Error("Expected error for missing Gitea token") + } + }) +} + +func TestResolver_Resolve(t *testing.T) { + t.Setenv("GITHUB_TOKEN", "gh_token") + t.Setenv("GITLAB_TOKEN", "gl_token") + t.Setenv("CODEBERG_TOKEN", "cb_token") + + resolver := NewResolver() + ctx := context.Background() + + tests := []struct { + platform Platform + host string + wantToken string + }{ + {PlatformGitHub, "", "gh_token"}, + {PlatformGitLab, "", "gl_token"}, + {PlatformCodeberg, "", "cb_token"}, + {PlatformGitea, "codeberg.org", "cb_token"}, + } + + for _, tt := range tests { + t.Run(string(tt.platform), func(t *testing.T) { + token, err := resolver.Resolve(ctx, tt.platform, tt.host) + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + if token.Value != tt.wantToken { + t.Errorf("Token = %q, want %q", token.Value, tt.wantToken) + } + }) + } +} + +func TestResolver_Resolve_UnknownPlatform(t *testing.T) { + resolver := NewResolver() + _, err := resolver.Resolve(context.Background(), "unknown_platform", "") + if err == nil { + t.Error("Expected error for unknown platform") + } +} + +func TestResolver_readTeaConfig(t *testing.T) { + // Create a temp home directory with .config/tea structure + tempDir := t.TempDir() + teaDir := filepath.Join(tempDir, ".config", "tea") + if err := os.MkdirAll(teaDir, 0o755); err != nil { + t.Fatalf("Failed to create tea config dir: %v", err) + } + + // Override HOME so UserHomeDir returns our temp dir + t.Setenv("HOME", tempDir) + + resolver := NewResolver() + + // Write a test config file + configPath := filepath.Join(teaDir, "config.yml") + configContent := `logins: + - name: codeberg + url: https://codeberg.org + token: cb_tea_token + default: true + - name: gitea-selfhosted + url: https://gitea.example.com + token: gitea_tea_token + default: false +` + if err := os.WriteFile(configPath, []byte(configContent), 0o644); err != nil { + t.Fatalf("Failed to write config file: %v", err) + } + + t.Run("exact host match", func(t *testing.T) { + token, err := resolver.readTeaConfig("codeberg.org") + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + if token != "cb_tea_token" { + t.Errorf("Token = %q, want %q", token, "cb_tea_token") + } + }) + + t.Run("exact host match with https prefix", func(t *testing.T) { + token, err := resolver.readTeaConfig("https://codeberg.org") + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + if token != "cb_tea_token" { + t.Errorf("Token = %q, want %q", token, "cb_tea_token") + } + }) + + t.Run("self-hosted gitea match", func(t *testing.T) { + token, err := resolver.readTeaConfig("gitea.example.com") + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + if token != "gitea_tea_token" { + t.Errorf("Token = %q, want %q", token, "gitea_tea_token") + } + }) + + t.Run("case insensitive host match", func(t *testing.T) { + token, err := resolver.readTeaConfig("CODEBERG.ORG") + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + if token != "cb_tea_token" { + t.Errorf("Token = %q, want %q", token, "cb_tea_token") + } + }) + + t.Run("no matching host uses default", func(t *testing.T) { + // The config has a default login, so unknown hosts should fall back to it + token, err := resolver.readTeaConfig("unknown.example.com") + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + if token != "cb_tea_token" { + t.Errorf("Token = %q, want %q (default)", token, "cb_tea_token") + } + }) +} + +func TestResolver_readTeaConfig_DefaultFallback(t *testing.T) { + tempDir := t.TempDir() + teaDir := filepath.Join(tempDir, ".config", "tea") + if err := os.MkdirAll(teaDir, 0o755); err != nil { + t.Fatalf("Failed to create tea config dir: %v", err) + } + t.Setenv("HOME", tempDir) + + // Config with only a default login + configPath := filepath.Join(teaDir, "config.yml") + configContent := `logins: + - name: default-instance + url: https://gitea.default.com + token: default_token + default: true +` + if err := os.WriteFile(configPath, []byte(configContent), 0o644); err != nil { + t.Fatalf("Failed to write config file: %v", err) + } + + resolver := NewResolver() + + // When host doesn't match but there's a default, should return default token + token, err := resolver.readTeaConfig("unknown.host.com") + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + if token != "default_token" { + t.Errorf("Token = %q, want %q", token, "default_token") + } +} + +func TestResolver_readTeaConfig_NoDefault(t *testing.T) { + tempDir := t.TempDir() + teaDir := filepath.Join(tempDir, ".config", "tea") + if err := os.MkdirAll(teaDir, 0o755); err != nil { + t.Fatalf("Failed to create tea config dir: %v", err) + } + t.Setenv("HOME", tempDir) + + // Config with NO default login + configPath := filepath.Join(teaDir, "config.yml") + configContent := `logins: + - name: specific-instance + url: https://gitea.specific.com + token: specific_token + default: false +` + if err := os.WriteFile(configPath, []byte(configContent), 0o644); err != nil { + t.Fatalf("Failed to write config file: %v", err) + } + + resolver := NewResolver() + + // When host doesn't match and there's no default, should return error + _, err := resolver.readTeaConfig("unknown.host.com") + if err == nil { + t.Error("Expected error when no matching host and no default") + } +} + +func TestResolver_readTeaConfig_InvalidYAML(t *testing.T) { + tempDir := t.TempDir() + teaDir := filepath.Join(tempDir, ".config", "tea") + if err := os.MkdirAll(teaDir, 0o755); err != nil { + t.Fatalf("Failed to create tea config dir: %v", err) + } + t.Setenv("HOME", tempDir) + + // Write invalid YAML + configPath := filepath.Join(teaDir, "config.yml") + if err := os.WriteFile(configPath, []byte("invalid: yaml: content: ["), 0o644); err != nil { + t.Fatalf("Failed to write config file: %v", err) + } + + resolver := NewResolver() + _, err := resolver.readTeaConfig("codeberg.org") + if err == nil { + t.Error("Expected error for invalid YAML") + } +} + +func TestResolver_ResolveGitea_TeaConfig(t *testing.T) { + // Clear env vars + os.Unsetenv("CODEBERG_TOKEN") + os.Unsetenv("GITEA_TOKEN") + + tempDir := t.TempDir() + teaDir := filepath.Join(tempDir, ".config", "tea") + if err := os.MkdirAll(teaDir, 0o755); err != nil { + t.Fatalf("Failed to create tea config dir: %v", err) + } + t.Setenv("HOME", tempDir) + + configPath := filepath.Join(teaDir, "config.yml") + configContent := `logins: + - name: codeberg + url: https://codeberg.org + token: tea_config_token + default: false +` + if err := os.WriteFile(configPath, []byte(configContent), 0o644); err != nil { + t.Fatalf("Failed to write config file: %v", err) + } + + resolver := NewResolver() + token, err := resolver.ResolveGitea(context.Background(), "codeberg.org") + if err != nil { + t.Errorf("Unexpected error: %v", err) + return + } + + if token.Value != "tea_config_token" { + t.Errorf("Token = %q, want %q", token.Value, "tea_config_token") + } + if token.Source != TokenSourceConfig { + t.Errorf("Source = %v, want %v", token.Source, TokenSourceConfig) + } +} diff --git a/pkg/prx/client.go b/pkg/prx/client.go index fa74671..30e3aad 100644 --- a/pkg/prx/client.go +++ b/pkg/prx/client.go @@ -1,6 +1,6 @@ -// Package prx provides a client for fetching GitHub pull request events. -// It includes support for caching API responses to improve performance and -// reduce API rate limit consumption. +// Package prx provides a client for fetching pull request events from code hosting platforms. +// It supports GitHub, GitLab, and Codeberg (Gitea), with caching to improve performance +// and reduce API rate limit consumption. package prx import ( @@ -10,7 +10,6 @@ import ( "errors" "fmt" "log/slog" - "net/http" "os" "path/filepath" "strconv" @@ -19,41 +18,24 @@ import ( "github.com/codeGROOVE-dev/fido" "github.com/codeGROOVE-dev/fido/pkg/store/localfs" - "github.com/codeGROOVE-dev/prx/pkg/prx/github" ) const ( - // HTTP client configuration constants. - maxIdleConns = 100 - maxIdleConnsPerHost = 10 - idleConnTimeoutSec = 90 - // Cache TTL constants. prCacheTTL = 20 * 24 * time.Hour // 20 days - validity checked against reference time - checkRunsCacheTTL = 20 * 24 * time.Hour // 20 days - validity checked against reference time collaboratorsCacheTTL = 3 * time.Hour // 3 hours - repo-level, simple TTL rulesetsCacheTTL = 3 * time.Hour // 3 hours - repo-level, simple TTL ) -// cachedCheckRuns stores check run events with a timestamp for cache validation. -type cachedCheckRuns struct { - CachedAt time.Time - Events []Event -} - // PRStore is the interface for PR cache storage backends. // This is an alias for fido.Store with the appropriate type parameters. type PRStore = fido.Store[string, PullRequestData] -// Client provides methods to fetch GitHub pull request events. +// Client provides methods to fetch pull request events from various platforms. type Client struct { - github *github.Client - logger *slog.Logger - collaboratorsCache *fido.Cache[string, map[string]string] - rulesetsCache *fido.Cache[string, []string] - checkRunsCache *fido.Cache[string, cachedCheckRuns] - prCache *fido.TieredCache[string, PullRequestData] - token string // Store token for recreating client with new transport + platform Platform + logger *slog.Logger + prCache *fido.TieredCache[string, PullRequestData] } // Option is a function that configures a Client. @@ -66,19 +48,6 @@ func WithLogger(logger *slog.Logger) Option { } } -// WithHTTPClient sets a custom HTTP client. -func WithHTTPClient(httpClient *http.Client) Option { - return func(c *Client) { - // Wrap the transport with retry logic if not already wrapped - if httpClient.Transport == nil { - httpClient.Transport = &github.Transport{Base: http.DefaultTransport} - } else if _, ok := httpClient.Transport.(*github.Transport); !ok { - httpClient.Transport = &github.Transport{Base: httpClient.Transport} - } - c.github = newGitHubClient(httpClient, c.token, github.API) - } -} - // WithCacheStore sets a custom cache store for PR data. // Use null.New[string, prx.PullRequestData]() to disable persistence. func WithCacheStore(store PRStore) Option { @@ -92,39 +61,28 @@ func WithCacheStore(store PRStore) Option { } } -// NewClient creates a new Client with the given GitHub token. -// Caching is enabled by default with disk persistence. -// Use WithCacheStore to provide a custom store (including null.New() to disable persistence). -// If token is empty, WithHTTPClient option must be provided. -func NewClient(token string, opts ...Option) *Client { - transport := &http.Transport{ - MaxIdleConns: maxIdleConns, - MaxIdleConnsPerHost: maxIdleConnsPerHost, - IdleConnTimeout: idleConnTimeoutSec * time.Second, - DisableCompression: false, - DisableKeepAlives: false, - } +// NewClient creates a new Client with the given platform. +// +// Deprecated: Use NewClientWithPlatform with an explicit platform instead. +// For GitHub: NewClientWithPlatform(github.NewPlatform(token), opts...) +// For GitLab: NewClientWithPlatform(gitlab.NewPlatform(token), opts...) +// For Gitea: NewClientWithPlatform(gitea.NewPlatform(token), opts...) +func NewClient(platform Platform, opts ...Option) *Client { + return NewClientWithPlatform(platform, opts...) +} + +// NewClientWithPlatform creates a new Client with the given platform. +// Use this to create clients for GitLab, Codeberg, or other platforms. +func NewClientWithPlatform(platform Platform, opts ...Option) *Client { c := &Client{ - logger: slog.Default(), - token: token, - collaboratorsCache: fido.New[string, map[string]string](fido.TTL(collaboratorsCacheTTL)), - rulesetsCache: fido.New[string, []string](fido.TTL(rulesetsCacheTTL)), - checkRunsCache: fido.New[string, cachedCheckRuns](fido.TTL(checkRunsCacheTTL)), - github: newGitHubClient( - &http.Client{ - Transport: &github.Transport{Base: transport}, - Timeout: 30 * time.Second, - }, - token, - github.API, - ), + platform: platform, + logger: slog.Default(), } for _, opt := range opts { opt(c) } - // Set up default cache if none was configured via options if c.prCache == nil { c.prCache = createDefaultCache(c.logger) } @@ -168,32 +126,32 @@ func (c *Client) PullRequestWithReferenceTime( refTime time.Time, ) (*PullRequestData, error) { if c.prCache == nil { - return c.pullRequestViaGraphQL(ctx, owner, repo, pr, refTime) + return c.platform.FetchPR(ctx, owner, repo, pr, refTime) } - key := prCacheKey(owner, repo, pr) + key := prCacheKey(c.platform.Name(), owner, repo, pr) if cached, found, err := c.prCache.Get(ctx, key); err != nil { c.logger.WarnContext(ctx, "cache get error", "error", err) } else if found { if !cached.CachedAt.Before(refTime) { - c.logger.InfoContext(ctx, "cache hit: GraphQL pull request", - "owner", owner, "repo", repo, "pr", pr, "cached_at", cached.CachedAt) + c.logger.InfoContext(ctx, "cache hit: pull request", + "platform", c.platform.Name(), "owner", owner, "repo", repo, "pr", pr, "cached_at", cached.CachedAt) return &cached, nil } - c.logger.InfoContext(ctx, "cache miss: GraphQL pull request expired", - "owner", owner, "repo", repo, "pr", pr, + c.logger.InfoContext(ctx, "cache miss: pull request expired", + "platform", c.platform.Name(), "owner", owner, "repo", repo, "pr", pr, "cached_at", cached.CachedAt, "reference_time", refTime) if err := c.prCache.Delete(ctx, key); err != nil { c.logger.WarnContext(ctx, "failed to delete stale cache entry", "error", err) } } else { - c.logger.InfoContext(ctx, "cache miss: GraphQL pull request not in cache", - "owner", owner, "repo", repo, "pr", pr) + c.logger.InfoContext(ctx, "cache miss: pull request not in cache", + "platform", c.platform.Name(), "owner", owner, "repo", repo, "pr", pr) } result, err := c.prCache.Fetch(ctx, key, func(ctx context.Context) (PullRequestData, error) { - data, err := c.pullRequestViaGraphQL(ctx, owner, repo, pr, refTime) + data, err := c.platform.FetchPR(ctx, owner, repo, pr, refTime) if err != nil { return PullRequestData{}, err } @@ -232,8 +190,8 @@ func NewCacheStore(dir string) (PRStore, error) { } // prCacheKey generates a cache key for PR data. -func prCacheKey(owner, repo string, prNumber int) string { - key := strings.Join([]string{"graphql", "pr_graphql", owner, repo, strconv.Itoa(prNumber)}, "/") +func prCacheKey(platform, owner, repo string, prNumber int) string { + key := strings.Join([]string{platform, "pr", owner, repo, strconv.Itoa(prNumber)}, "/") hash := sha256.Sum256([]byte(key)) return hex.EncodeToString(hash[:]) } @@ -247,8 +205,3 @@ func collaboratorsCacheKey(owner, repo string) string { func rulesetsCacheKey(owner, repo string) string { return fmt.Sprintf("%s/%s", owner, repo) } - -// checkRunsCacheKey generates a cache key for check runs data. -func checkRunsCacheKey(owner, repo, sha string) string { - return fmt.Sprintf("%s/%s/%s", owner, repo, sha) -} diff --git a/pkg/prx/client_cache_internal_test.go b/pkg/prx/client_cache_internal_test.go new file mode 100644 index 0000000..07c7975 --- /dev/null +++ b/pkg/prx/client_cache_internal_test.go @@ -0,0 +1,77 @@ +package prx + +import ( + "testing" +) + +func TestCacheKeyGeneration(t *testing.T) { + // Test that cache keys are consistent + key1 := prCacheKey("github", "owner", "repo", 123) + key2 := prCacheKey("github", "owner", "repo", 123) + + if key1 != key2 { + t.Error("Cache keys should be consistent for same inputs") + } + + // Test that different inputs produce different keys + key3 := prCacheKey("github", "owner", "repo", 456) + if key1 == key3 { + t.Error("Different inputs should produce different cache keys") + } + + // Test that different platforms produce different keys + key4 := prCacheKey("gitlab", "owner", "repo", 123) + if key1 == key4 { + t.Error("Different platforms should produce different cache keys") + } + + // Verify key format (should be 64 char hex string) + if len(key1) != 64 { + t.Errorf("Cache key should be 64 characters, got %d", len(key1)) + } + + if !isHexString(key1) { + t.Error("Cache key should be a hex string") + } +} + +func TestIsHexString(t *testing.T) { + tests := []struct { + input string + expected bool + }{ + {"0123456789abcdef", true}, + {"ABCDEF", true}, + {"0123456789ABCDEF", true}, + {"xyz", false}, + {"12g4", false}, + {"", true}, // Empty string is technically all hex + } + + for _, tt := range tests { + result := isHexString(tt.input) + if result != tt.expected { + t.Errorf("isHexString(%q) = %v, want %v", tt.input, result, tt.expected) + } + } +} + +func TestRulesetsCacheKey(t *testing.T) { + key1 := rulesetsCacheKey("owner", "repo") + key2 := rulesetsCacheKey("owner", "repo") + + if key1 != key2 { + t.Errorf("Same inputs produced different keys: %s vs %s", key1, key2) + } + + key3 := rulesetsCacheKey("other", "repo") + if key1 == key3 { + t.Error("Different inputs produced same key") + } + + // Verify format + expected := "owner/repo" + if key1 != expected { + t.Errorf("Expected key %q, got %q", expected, key1) + } +} diff --git a/pkg/prx/client_cache_test.go b/pkg/prx/client_cache_test.go index dde5f8d..9bd1718 100644 --- a/pkg/prx/client_cache_test.go +++ b/pkg/prx/client_cache_test.go @@ -1,4 +1,4 @@ -package prx +package prx_test import ( "context" @@ -8,6 +8,9 @@ import ( "os" "testing" "time" + + "github.com/codeGROOVE-dev/prx/pkg/prx" + "github.com/codeGROOVE-dev/prx/pkg/prx/github" ) func TestCacheClient(t *testing.T) { @@ -80,14 +83,14 @@ func TestCacheClient(t *testing.T) { defer server.Close() // Create cache store and client with test server - store, err := NewCacheStore(cacheDir) + store, err := prx.NewCacheStore(cacheDir) if err != nil { t.Fatalf("Failed to create cache store: %v", err) } - client := NewClient("test-token", - WithCacheStore(store), - WithHTTPClient(&http.Client{Transport: &http.Transport{}}), - WithLogger(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug}))), + platform := github.NewTestPlatform("test-token", server.URL) + client := prx.NewClientWithPlatform(platform, + prx.WithCacheStore(store), + prx.WithLogger(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug}))), ) defer func() { if closeErr := client.Close(); closeErr != nil { @@ -95,9 +98,6 @@ func TestCacheClient(t *testing.T) { } }() - // Override the GitHub client to use test server - client.github = newTestGitHubClient(&http.Client{Transport: &http.Transport{}}, "test-token", server.URL) - ctx := context.Background() // First request - should hit the API @@ -146,72 +146,6 @@ func TestCacheClient(t *testing.T) { } } -func TestCacheKeyGeneration(t *testing.T) { - // Test that cache keys are consistent - key1 := prCacheKey("owner", "repo", 123) - key2 := prCacheKey("owner", "repo", 123) - - if key1 != key2 { - t.Error("Cache keys should be consistent for same inputs") - } - - // Test that different inputs produce different keys - key3 := prCacheKey("owner", "repo", 456) - if key1 == key3 { - t.Error("Different inputs should produce different cache keys") - } - - // Verify key format (should be 64 char hex string) - if len(key1) != 64 { - t.Errorf("Cache key should be 64 characters, got %d", len(key1)) - } - - if !isHexString(key1) { - t.Error("Cache key should be a hex string") - } -} - -func TestIsHexString(t *testing.T) { - tests := []struct { - input string - expected bool - }{ - {"0123456789abcdef", true}, - {"ABCDEF", true}, - {"0123456789ABCDEF", true}, - {"xyz", false}, - {"12g4", false}, - {"", true}, // Empty string is technically all hex - } - - for _, tt := range tests { - result := isHexString(tt.input) - if result != tt.expected { - t.Errorf("isHexString(%q) = %v, want %v", tt.input, result, tt.expected) - } - } -} - -func TestRulesetsCacheKey(t *testing.T) { - key1 := rulesetsCacheKey("owner", "repo") - key2 := rulesetsCacheKey("owner", "repo") - - if key1 != key2 { - t.Errorf("Same inputs produced different keys: %s vs %s", key1, key2) - } - - key3 := rulesetsCacheKey("other", "repo") - if key1 == key3 { - t.Error("Different inputs produced same key") - } - - // Verify format - expected := "owner/repo" - if key1 != expected { - t.Errorf("Expected key %q, got %q", expected, key1) - } -} - func TestRulesetsCache(t *testing.T) { // Track API calls to rulesets endpoint rulesetsAPICallCount := 0 @@ -274,22 +208,13 @@ func TestRulesetsCache(t *testing.T) { })) defer server.Close() - client := NewClient("test-token", - WithHTTPClient(&http.Client{Transport: &http.Transport{}}), - WithLogger(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug}))), - ) - client.github = newTestGitHubClient(&http.Client{Transport: &http.Transport{}}, "test-token", server.URL) - defer func() { - if err := client.Close(); err != nil { - t.Errorf("Failed to close client: %v", err) - } - }() + platform := github.NewTestPlatform("test-token", server.URL) ctx := context.Background() refTime := time.Now() // First request - should call rulesets API - _, err := client.pullRequestViaGraphQL(ctx, "test", "repo", 1, refTime) + _, err := platform.FetchPR(ctx, "test", "repo", 1, refTime) if err != nil { t.Fatalf("First request failed: %v", err) } @@ -299,7 +224,7 @@ func TestRulesetsCache(t *testing.T) { } // Second request - should use cached rulesets - _, err = client.pullRequestViaGraphQL(ctx, "test", "repo", 1, refTime) + _, err = platform.FetchPR(ctx, "test", "repo", 1, refTime) if err != nil { t.Fatalf("Second request failed: %v", err) } @@ -309,7 +234,7 @@ func TestRulesetsCache(t *testing.T) { } // Third request for same repo - should still use cache - _, err = client.pullRequestViaGraphQL(ctx, "test", "repo", 2, refTime) + _, err = platform.FetchPR(ctx, "test", "repo", 2, refTime) if err != nil { t.Fatalf("Third request failed: %v", err) } diff --git a/pkg/prx/client_cachestore_test.go b/pkg/prx/client_cachestore_test.go index 56b2d44..11c85da 100644 --- a/pkg/prx/client_cachestore_test.go +++ b/pkg/prx/client_cachestore_test.go @@ -67,8 +67,8 @@ func TestPRCacheKey(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - key1 := prCacheKey(tt.owner, tt.repo, tt.prNumber) - key2 := prCacheKey(tt.owner, tt.repo, tt.prNumber) + key1 := prCacheKey("github", tt.owner, tt.repo, tt.prNumber) + key2 := prCacheKey("github", tt.owner, tt.repo, tt.prNumber) // Same inputs should produce same key if key1 != key2 { @@ -83,8 +83,8 @@ func TestPRCacheKey(t *testing.T) { } // Different inputs should produce different keys - key1 := prCacheKey("owner", "repo", 1) - key2 := prCacheKey("owner", "repo", 2) + key1 := prCacheKey("github", "owner", "repo", 1) + key2 := prCacheKey("github", "owner", "repo", 2) if key1 == key2 { t.Errorf("Different inputs produced same key") } @@ -103,22 +103,3 @@ func TestCollaboratorsCacheKey(t *testing.T) { t.Errorf("Different inputs produced same key") } } - -func TestClientClose(t *testing.T) { - tmpDir := t.TempDir() - store, err := NewCacheStore(tmpDir) - if err != nil { - t.Fatalf("Failed to create cache store: %v", err) - } - client := NewClient("test-token", WithCacheStore(store)) - - // Close should not error - if err := client.Close(); err != nil { - t.Errorf("Close returned error: %v", err) - } - - // Closing again should be safe - if err := client.Close(); err != nil { - t.Errorf("Second close returned error: %v", err) - } -} diff --git a/pkg/prx/client_close_test.go b/pkg/prx/client_close_test.go new file mode 100644 index 0000000..1001057 --- /dev/null +++ b/pkg/prx/client_close_test.go @@ -0,0 +1,27 @@ +package prx_test + +import ( + "testing" + + "github.com/codeGROOVE-dev/prx/pkg/prx" + "github.com/codeGROOVE-dev/prx/pkg/prx/github" +) + +func TestClientClose(t *testing.T) { + tmpDir := t.TempDir() + store, err := prx.NewCacheStore(tmpDir) + if err != nil { + t.Fatalf("Failed to create cache store: %v", err) + } + client := prx.NewClientWithPlatform(github.NewTestPlatform("test-token", ""), prx.WithCacheStore(store)) + + // Close should not error + if err := client.Close(); err != nil { + t.Errorf("Close returned error: %v", err) + } + + // Closing again should be safe + if err := client.Close(); err != nil { + t.Errorf("Second close returned error: %v", err) + } +} diff --git a/pkg/prx/client_graphql.go b/pkg/prx/client_graphql.go deleted file mode 100644 index f235b57..0000000 --- a/pkg/prx/client_graphql.go +++ /dev/null @@ -1,323 +0,0 @@ -package prx - -import ( - "context" - "fmt" - "slices" - "sort" - "strings" - "time" - - "github.com/codeGROOVE-dev/prx/pkg/prx/github" -) - -// pullRequestViaGraphQL fetches pull request data using GraphQL with minimal REST fallbacks. -// This hybrid approach reduces API calls from 13+ to ~3-4 while maintaining complete data fidelity. -// The refTime parameter is used for cache validation of sub-requests like check runs. -func (c *Client) pullRequestViaGraphQL(ctx context.Context, owner, repo string, prNumber int, refTime time.Time) (*PullRequestData, error) { - c.logger.InfoContext(ctx, "fetching pull request via GraphQL", "owner", owner, "repo", repo, "pr", prNumber) - - // Main GraphQL query - gets 90% of the data in one call - prData, err := c.fetchPullRequestCompleteViaGraphQL(ctx, owner, repo, prNumber) - if err != nil { - // Don't fall back to REST - fail with the GraphQL error - return nil, fmt.Errorf("GraphQL query failed: %w", err) - } - - // REST API calls for missing data (minimal) - // 1. Fetch rulesets (not available in GraphQL) - additionalRequired, err := c.fetchRulesetsREST(ctx, owner, repo) - if err != nil { - c.logger.WarnContext(ctx, "failed to fetch rulesets", "error", err) - } else if prData.PullRequest.CheckSummary != nil && len(additionalRequired) > 0 { - // Add to existing required checks - // Would need to recalculate with new required checks - c.logger.InfoContext(ctx, "added required checks from rulesets", "count", len(additionalRequired)) - } - - // Get existing required checks from GraphQL - existingRequired := c.existingRequiredChecks(prData) - - // Combine with additional required checks from rulesets - existingRequired = append(existingRequired, additionalRequired...) - - // 2. Fetch check runs via REST for all commits (GraphQL's statusCheckRollup is often null) - // This ensures we capture check run history including failures from earlier commits - checkRunEvents := c.fetchAllCheckRunsREST(ctx, owner, repo, prData, refTime) - - // Mark check runs as required based on combined list - for i := range checkRunEvents { - if slices.Contains(existingRequired, checkRunEvents[i].Body) { - checkRunEvents[i].Required = true - } - } - - // Add check run events to the events list - prData.Events = append(prData.Events, checkRunEvents...) - - // Recalculate check summary with the new check run data - if len(checkRunEvents) > 0 { - c.recalculateCheckSummaryWithCheckRuns(ctx, prData, checkRunEvents) - } - - c.logger.InfoContext(ctx, "fetched check runs via REST", "count", len(checkRunEvents)) - - // Sort all events chronologically (oldest to newest) - sort.Slice(prData.Events, func(i, j int) bool { - return prData.Events[i].Timestamp.Before(prData.Events[j].Timestamp) - }) - - apiCallsUsed := 2 // GraphQL + rulesets - if len(checkRunEvents) > 0 { - apiCallsUsed++ // + check runs - } - - c.logger.InfoContext(ctx, "successfully fetched pull request via hybrid GraphQL+REST", - "owner", owner, "repo", repo, "pr", prNumber, - "event_count", len(prData.Events), - "api_calls_made", fmt.Sprintf("%d (vs 13+ with REST)", apiCallsUsed)) - - return prData, nil -} - -// fetchRulesetsREST fetches repository rulesets via REST API (not available in GraphQL). -// Results are cached for 3 hours to reduce API calls. Uses Fetch to prevent thundering herds. -func (c *Client) fetchRulesetsREST(ctx context.Context, owner, repo string) ([]string, error) { - cacheKey := rulesetsCacheKey(owner, repo) - - return c.rulesetsCache.Fetch(cacheKey, func() ([]string, error) { - path := fmt.Sprintf("/repos/%s/%s/rulesets", owner, repo) - var rulesets []github.Ruleset - - if _, err := c.github.Get(ctx, path, &rulesets); err != nil { - return nil, err - } - - var required []string - for _, rs := range rulesets { - if rs.Target != "branch" { - continue - } - for _, rule := range rs.Rules { - if rule.Type == "required_status_checks" && rule.Parameters.RequiredStatusChecks != nil { - for _, chk := range rule.Parameters.RequiredStatusChecks { - required = append(required, chk.Context) - } - } - } - } - - c.logger.InfoContext(ctx, "fetched required checks from rulesets", - "owner", owner, "repo", repo, "count", len(required), "checks", required) - - return required, nil - }) -} - -// truncateSHA returns the first 7 characters of a SHA, or the full string if shorter. -func truncateSHA(sha string) string { - if len(sha) > 7 { - return sha[:7] - } - return sha -} - -// fetchCheckRunsREST fetches check runs via REST API for a specific commit. -// Results are cached and validated against refTime. -func (c *Client) fetchCheckRunsREST(ctx context.Context, owner, repo, sha string, refTime time.Time) ([]Event, error) { - if sha == "" { - return nil, nil - } - - cacheKey := checkRunsCacheKey(owner, repo, sha) - - // Check cache with reference time validation - if cached, ok := c.checkRunsCache.Get(cacheKey); ok { - if !cached.CachedAt.Before(refTime) { - c.logger.InfoContext(ctx, "cache hit: check runs", - "owner", owner, "repo", repo, "sha", truncateSHA(sha), "count", len(cached.Events)) - return cached.Events, nil - } - c.logger.InfoContext(ctx, "cache miss: check runs expired", - "owner", owner, "repo", repo, "sha", truncateSHA(sha), - "cached_at", cached.CachedAt, "reference_time", refTime) - } - - path := fmt.Sprintf("/repos/%s/%s/commits/%s/check-runs?per_page=100", owner, repo, sha) - var checkRuns github.CheckRuns - if _, err := c.github.Get(ctx, path, &checkRuns); err != nil { - return nil, fmt.Errorf("fetching check runs: %w", err) - } - - var events []Event - for _, run := range checkRuns.CheckRuns { - if run == nil { - continue - } - - var timestamp time.Time - var outcome string - - switch { - case !run.CompletedAt.IsZero(): - timestamp = run.CompletedAt - outcome = strings.ToLower(run.Conclusion) - case !run.StartedAt.IsZero(): - timestamp = run.StartedAt - outcome = strings.ToLower(run.Status) - default: - // No timestamp available, skip this check run - continue - } - - event := Event{ - Kind: EventKindCheckRun, - Timestamp: timestamp, - Actor: "github", - Bot: true, - Body: run.Name, - Outcome: outcome, - } - - // Build description from output - switch { - case run.Output.Title != "" && run.Output.Summary != "": - event.Description = fmt.Sprintf("%s: %s", run.Output.Title, run.Output.Summary) - case run.Output.Title != "": - event.Description = run.Output.Title - case run.Output.Summary != "": - event.Description = run.Output.Summary - default: - // No description available - } - - events = append(events, event) - } - - // Cache the results - c.checkRunsCache.Set(cacheKey, cachedCheckRuns{ - Events: events, - CachedAt: time.Now(), - }) - - c.logger.InfoContext(ctx, "fetched check runs from API", - "owner", owner, "repo", repo, "sha", truncateSHA(sha), "count", len(events)) - - return events, nil -} - -// fetchAllCheckRunsREST fetches check runs for all commits in the PR. -// This ensures we capture the full history including failures from earlier commits -// that may have been superseded by successful runs on later commits. -// Errors fetching individual commits are logged but don't stop the overall process. -// The refTime parameter is used for cache validation. -func (c *Client) fetchAllCheckRunsREST(ctx context.Context, owner, repo string, prData *PullRequestData, refTime time.Time) []Event { - // Collect all unique commit SHAs from the PR - shas := make(map[string]bool) - - // Add HEAD SHA (most important) - if prData.PullRequest.HeadSHA != "" { - shas[prData.PullRequest.HeadSHA] = true - } - - // Add all other commit SHAs from commit events - for i := range prData.Events { - e := &prData.Events[i] - if e.Kind == EventKindCommit && e.Body != "" { - shas[e.Body] = true - } - } - - // Fetch check runs for each unique commit - var all []Event - seen := make(map[string]bool) // Track unique check runs by "name:timestamp" - - for sha := range shas { - events, err := c.fetchCheckRunsREST(ctx, owner, repo, sha, refTime) - if err != nil { - c.logger.WarnContext(ctx, "failed to fetch check runs for commit", "sha", sha, "error", err) - continue - } - - // Add only unique check runs (same check can run on multiple commits) - for i := range events { - ev := &events[i] - key := fmt.Sprintf("%s:%s", ev.Body, ev.Timestamp.Format(time.RFC3339Nano)) - if !seen[key] { - seen[key] = true - ev.Target = sha - all = append(all, *ev) - } - } - } - - return all -} - -// existingRequiredChecks extracts required checks that were already identified. -func (*Client) existingRequiredChecks(prData *PullRequestData) []string { - var required []string - - // Extract from existing events that are marked as required - for i := range prData.Events { - e := &prData.Events[i] - if e.Required && (e.Kind == EventKindCheckRun || e.Kind == EventKindStatusCheck) { - required = append(required, e.Body) - } - } - - // Also extract from pending checks in check summary (these are required but haven't run) - if prData.PullRequest.CheckSummary != nil { - for chk := range prData.PullRequest.CheckSummary.Pending { - if !slices.Contains(required, chk) { - required = append(required, chk) - } - } - } - - return required -} - -// recalculateCheckSummaryWithCheckRuns updates the check summary with REST-fetched check runs. -// This recalculates the entire check summary from ALL events to ensure we have the latest state. -func (c *Client) recalculateCheckSummaryWithCheckRuns(_ /* ctx */ context.Context, prData *PullRequestData, _ /* checkRunEvents */ []Event) { - // Get existing required checks before we overwrite the summary - var required []string - if prData.PullRequest.CheckSummary != nil { - for chk := range prData.PullRequest.CheckSummary.Pending { - required = append(required, chk) - } - } - - // Recalculate the entire check summary from ALL events (including the new check runs) - // This ensures we get the latest state based on timestamps - prData.PullRequest.CheckSummary = calculateCheckSummary(prData.Events, required) - - // Update test state based on the recalculated check summary - prData.PullRequest.TestState = c.calculateTestStateFromCheckSummary(prData.PullRequest.CheckSummary) -} - -// calculateTestStateFromCheckSummary determines test state from a CheckSummary. -// This looks at the LATEST state of checks (after deduplication) rather than all events. -func (*Client) calculateTestStateFromCheckSummary(summary *CheckSummary) string { - if summary == nil { - return TestStateNone - } - - // Any failing checks means tests are failing - if len(summary.Failing) > 0 { - return TestStateFailing - } - - // Any pending checks means tests are pending - if len(summary.Pending) > 0 { - return TestStatePending - } - - // If we have successful checks and nothing failing/pending, tests are passing - if len(summary.Success) > 0 { - return TestStatePassing - } - - return TestStateNone -} diff --git a/pkg/prx/client_pullrequest_test.go b/pkg/prx/client_pullrequest_test.go index e864ebf..cc7520b 100644 --- a/pkg/prx/client_pullrequest_test.go +++ b/pkg/prx/client_pullrequest_test.go @@ -1,5 +1,5 @@ //nolint:errcheck,gocritic // Test handlers don't need to check w.Write errors; if-else chains are fine for URL routing -package prx +package prx_test import ( "context" @@ -8,6 +8,9 @@ import ( "strings" "testing" "time" + + "github.com/codeGROOVE-dev/prx/pkg/prx" + "github.com/codeGROOVE-dev/prx/pkg/prx/github" ) func TestClient_PullRequest(t *testing.T) { @@ -57,11 +60,8 @@ func TestClient_PullRequest(t *testing.T) { })) defer server.Close() - httpClient := &http.Client{Transport: http.DefaultTransport} - client := NewClient("test-token", WithHTTPClient(httpClient)) - - // Override the API URL to point to our test server - client.github = newTestGitHubClient(httpClient, "test-token", server.URL) + platform := github.NewTestPlatform("test-token", server.URL) + client := prx.NewClientWithPlatform(platform) ctx := context.Background() prData, err := client.PullRequest(ctx, "testowner", "testrepo", 123) @@ -130,15 +130,12 @@ func TestClient_PullRequestWithCache(t *testing.T) { })) defer server.Close() - httpClient := &http.Client{Transport: http.DefaultTransport} - store, err := NewCacheStore(tmpDir) + store, err := prx.NewCacheStore(tmpDir) if err != nil { t.Fatalf("Failed to create cache store: %v", err) } - client := NewClient("test-token", WithCacheStore(store), WithHTTPClient(httpClient)) - - // Override the API URL - client.github = newTestGitHubClient(httpClient, "test-token", server.URL) + platform := github.NewTestPlatform("test-token", server.URL) + client := prx.NewClientWithPlatform(platform, prx.WithCacheStore(store)) ctx := context.Background() refTime := time.Now() diff --git a/pkg/prx/event_processing.go b/pkg/prx/event_processing.go index 7f6e02a..29514fa 100644 --- a/pkg/prx/event_processing.go +++ b/pkg/prx/event_processing.go @@ -4,9 +4,9 @@ import ( "time" ) -// filterEvents removes non-essential events to reduce noise. +// FilterEvents removes non-essential events to reduce noise. // Currently filters out successful status_check events (keeps failures). -func filterEvents(events []Event) []Event { +func FilterEvents(events []Event) []Event { filtered := make([]Event, 0, len(events)) for i := range events { @@ -26,9 +26,9 @@ func filterEvents(events []Event) []Event { return filtered } -// upgradeWriteAccess scans through events and upgrades write_access from 1 (likely) to 2 (definitely) +// UpgradeWriteAccess scans through events and upgrades write_access from 1 (likely) to 2 (definitely) // for actors who have performed actions that require write access. -func upgradeWriteAccess(events []Event) { +func UpgradeWriteAccess(events []Event) { // Track actors who have definitely demonstrated write access confirmed := make(map[string]bool) @@ -57,8 +57,8 @@ func upgradeWriteAccess(events []Event) { } } -// calculateCheckSummary analyzes check/status events and categorizes them by outcome. -func calculateCheckSummary(events []Event, requiredChecks []string) *CheckSummary { +// CalculateCheckSummary analyzes check/status events and categorizes them by outcome. +func CalculateCheckSummary(events []Event, requiredChecks []string) *CheckSummary { summary := &CheckSummary{ Success: make(map[string]string), Failing: make(map[string]string), @@ -143,8 +143,8 @@ func calculateCheckSummary(events []Event, requiredChecks []string) *CheckSummar return summary } -// calculateApprovalSummary analyzes review events and categorizes approvals by reviewer's write access. -func calculateApprovalSummary(events []Event) *ApprovalSummary { +// CalculateApprovalSummary analyzes review events and categorizes approvals by reviewer's write access. +func CalculateApprovalSummary(events []Event) *ApprovalSummary { summary := &ApprovalSummary{} // Track the latest review state from each user @@ -184,9 +184,9 @@ func calculateApprovalSummary(events []Event) *ApprovalSummary { return summary } -// calculateParticipantAccess builds a map of all PR participants to their write access levels. +// CalculateParticipantAccess builds a map of all PR participants to their write access levels. // Includes the PR author, assignees, reviewers, and all event actors. -func calculateParticipantAccess(events []Event, pr *PullRequest) map[string]int { +func CalculateParticipantAccess(events []Event, pr *PullRequest) map[string]int { participants := make(map[string]int) // Add the PR author diff --git a/pkg/prx/example_test.go b/pkg/prx/example_test.go index 723e988..76676df 100644 --- a/pkg/prx/example_test.go +++ b/pkg/prx/example_test.go @@ -7,6 +7,7 @@ import ( "os" "github.com/codeGROOVE-dev/prx/pkg/prx" + "github.com/codeGROOVE-dev/prx/pkg/prx/github" ) func Example() { @@ -16,7 +17,7 @@ func Example() { log.Fatal("GITHUB_TOKEN environment variable not set") } - client := prx.NewClient(token) + client := prx.NewClientWithPlatform(github.NewPlatform(token)) // Fetch events for a pull request ctx := context.Background() @@ -44,7 +45,7 @@ func Example() { func ExampleClient_PullRequest() { // Create a client with custom logger token := os.Getenv("GITHUB_TOKEN") - client := prx.NewClient(token) + client := prx.NewClientWithPlatform(github.NewPlatform(token)) // Fetch all events for PR #123 ctx := context.Background() diff --git a/pkg/prx/gitea/platform.go b/pkg/prx/gitea/platform.go new file mode 100644 index 0000000..ef3ad94 --- /dev/null +++ b/pkg/prx/gitea/platform.go @@ -0,0 +1,737 @@ +// Package gitea provides a Gitea/Codeberg platform implementation for fetching +// pull request data from Gitea-based forges. +package gitea + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "sort" + "strings" + "time" + + "github.com/codeGROOVE-dev/fido" + "github.com/codeGROOVE-dev/prx/pkg/prx" +) + +// Cache TTL constants. +const ( + prDataCacheTTL = 20 * 24 * time.Hour // 20 days - validity checked against reference time +) + +// Cached data types with timestamps for reference time validation. +// +//nolint:govet // fieldalignment: cache structs prioritize readability over memory layout +type cachedReviews struct { + Data []review + CachedAt time.Time +} + +//nolint:govet // fieldalignment: cache structs prioritize readability over memory layout +type cachedComments struct { + Data []comment + CachedAt time.Time +} + +//nolint:govet // fieldalignment: cache structs prioritize readability over memory layout +type cachedCommits struct { + Data []commit + CachedAt time.Time +} + +//nolint:govet // fieldalignment: cache structs prioritize readability over memory layout +type cachedTimeline struct { + Data []timelineEvent + CachedAt time.Time +} + +// Platform implements the prx.Platform interface for Gitea-based forges (Codeberg, self-hosted Gitea). +// +//nolint:govet // fieldalignment: struct fields ordered for clarity +type Platform struct { + logger *slog.Logger + httpClient *http.Client + token string + baseURL string + reviewsCache *fido.Cache[string, cachedReviews] + commentsCache *fido.Cache[string, cachedComments] + commitsCache *fido.Cache[string, cachedCommits] + timelineCache *fido.Cache[string, cachedTimeline] +} + +// Option configures a Platform. +type Option func(*Platform) + +// WithLogger sets a custom logger for the Gitea platform. +func WithLogger(logger *slog.Logger) Option { + return func(p *Platform) { + p.logger = logger + } +} + +// WithHTTPClient sets a custom HTTP client for the Gitea platform. +func WithHTTPClient(client *http.Client) Option { + return func(p *Platform) { + p.httpClient = client + } +} + +// WithBaseURL sets a custom base URL for self-hosted Gitea instances. +func WithBaseURL(baseURL string) Option { + return func(p *Platform) { + p.baseURL = strings.TrimSuffix(baseURL, "/") + } +} + +// NewPlatform creates a new Gitea platform client. +// For Codeberg, use NewCodebergPlatform instead. +func NewPlatform(token string, opts ...Option) *Platform { + p := &Platform{ + httpClient: &http.Client{Timeout: 30 * time.Second}, + token: token, + baseURL: "https://codeberg.org", // Default to Codeberg + logger: slog.Default(), + reviewsCache: fido.New[string, cachedReviews](fido.TTL(prDataCacheTTL)), + commentsCache: fido.New[string, cachedComments](fido.TTL(prDataCacheTTL)), + commitsCache: fido.New[string, cachedCommits](fido.TTL(prDataCacheTTL)), + timelineCache: fido.New[string, cachedTimeline](fido.TTL(prDataCacheTTL)), + } + + for _, opt := range opts { + opt(p) + } + + return p +} + +// NewCodebergPlatform creates a new Codeberg platform client. +func NewCodebergPlatform(token string, opts ...Option) *Platform { + return NewPlatform(token, opts...) +} + +// Name returns the platform identifier. +func (p *Platform) Name() string { + if strings.Contains(p.baseURL, "codeberg.org") { + return prx.PlatformCodeberg + } + return "gitea" +} + +// FetchPR retrieves a pull request with all events and metadata. +func (p *Platform) FetchPR(ctx context.Context, owner, repo string, number int, refTime time.Time) (*prx.PullRequestData, error) { + p.logger.Info("fetching pull request via Gitea REST API", + "owner", owner, "repo", repo, "pr", number) + + // Fetch pull request details (not cached - contains updatedAt for reference). + pr, err := p.fetchPullRequest(ctx, owner, repo, number) + if err != nil { + return nil, fmt.Errorf("fetch pull request: %w", err) + } + + // Fetch reviews (cached with reference time validation). + reviews, err := p.fetchReviews(ctx, owner, repo, number, refTime) + if err != nil { + p.logger.Warn("failed to fetch reviews", "error", err) + } + + // Fetch comments (cached with reference time validation). + comments, err := p.fetchComments(ctx, owner, repo, number, refTime) + if err != nil { + p.logger.Warn("failed to fetch comments", "error", err) + } + + // Fetch commits (cached with reference time validation). + commits, err := p.fetchCommits(ctx, owner, repo, number, refTime) + if err != nil { + p.logger.Warn("failed to fetch commits", "error", err) + } + + // Fetch timeline events (cached with reference time validation). + timeline, err := p.fetchTimeline(ctx, owner, repo, number, refTime) + if err != nil { + p.logger.Warn("failed to fetch timeline", "error", err) + } + + // Convert to neutral format. + pullRequest := convertPullRequest(pr, reviews) + events := convertToEvents(pr, reviews, comments, commits, timeline) + + // Sort events by timestamp. + sort.Slice(events, func(i, j int) bool { + return events[i].Timestamp.Before(events[j].Timestamp) + }) + + // Finalize with calculated summaries. + prx.FinalizePullRequest(&pullRequest, events, nil, "") + + return &prx.PullRequestData{ + CachedAt: time.Now(), + PullRequest: pullRequest, + Events: events, + }, nil +} + +// Gitea API response types. +// See: https://docs.gitea.com/api/1.20/ + +//nolint:govet // fieldalignment: struct fields ordered for JSON clarity and API compatibility +type pullRequest struct { + User user `json:"user"` + Assignee *user `json:"assignee"` + MergedBy *user `json:"merged_by"` + Head branch `json:"head"` + Base branch `json:"base"` + MergedAt *time.Time `json:"merged_at"` + ClosedAt *time.Time `json:"closed_at"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Assignees []user `json:"assignees"` + RequestedReviewers []user `json:"requested_reviewers"` + Labels []label `json:"labels"` + Title string `json:"title"` + Body string `json:"body"` + State string `json:"state"` // "open", "closed" + HTMLURL string `json:"html_url"` + DiffURL string `json:"diff_url"` + PatchURL string `json:"patch_url"` + MergeBase string `json:"merge_base"` + Mergeable bool `json:"mergeable"` + Merged bool `json:"merged"` + ID int64 `json:"id"` + Number int `json:"number"` + Additions int `json:"additions"` + Deletions int `json:"deletions"` + ChangedFiles int `json:"changed_files"` + Draft bool `json:"draft"` +} + +type user struct { + Login string `json:"login"` + FullName string `json:"full_name"` + Email string `json:"email"` + AvatarURL string `json:"avatar_url"` + ID int64 `json:"id"` + IsAdmin bool `json:"is_admin"` +} + +type branch struct { + Repo *repo `json:"repo"` + Ref string `json:"ref"` + SHA string `json:"sha"` +} + +type repo struct { + Name string `json:"name"` + FullName string `json:"full_name"` + HTMLURL string `json:"html_url"` + ID int64 `json:"id"` +} + +type label struct { + Name string `json:"name"` + Color string `json:"color"` + ID int64 `json:"id"` +} + +//nolint:govet // fieldalignment: JSON API structs prioritize readability over memory layout +type review struct { + User user `json:"user"` + SubmittedAt time.Time `json:"submitted_at"` + Body string `json:"body"` + State string `json:"state"` // "APPROVED", "REQUEST_CHANGES", "COMMENT", "PENDING" + HTMLURL string `json:"html_url"` + ID int64 `json:"id"` + Official bool `json:"official"` + Stale bool `json:"stale"` + Dismissed bool `json:"dismissed"` +} + +//nolint:govet // fieldalignment: JSON API structs prioritize readability over memory layout +type comment struct { + User user `json:"user"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Body string `json:"body"` + HTMLURL string `json:"html_url"` + ID int64 `json:"id"` +} + +type commit struct { + Author *user `json:"author"` + Commit commitInfo `json:"commit"` + SHA string `json:"sha"` + HTMLURL string `json:"html_url"` +} + +type commitInfo struct { + Author commitAuthor `json:"author"` + Committer commitAuthor `json:"committer"` + Message string `json:"message"` +} + +type commitAuthor struct { + Date time.Time `json:"date"` + Name string `json:"name"` + Email string `json:"email"` +} + +type timelineEvent struct { + User *user `json:"user"` + Assignee *user `json:"assignee"` + Label *label `json:"label"` + OldRef string `json:"old_ref"` + NewRef string `json:"new_ref"` + RefAction string `json:"ref_action"` + CreatedAt time.Time `json:"created_at"` + Type string `json:"type"` + Body string `json:"body"` + ID int64 `json:"id"` +} + +// API fetch methods. + +func (p *Platform) fetchPullRequest(ctx context.Context, owner, repoName string, number int) (*pullRequest, error) { + url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls/%d", p.baseURL, owner, repoName, number) + + var pr pullRequest + if err := p.doRequest(ctx, url, &pr); err != nil { + return nil, err + } + return &pr, nil +} + +func (p *Platform) fetchReviews(ctx context.Context, owner, repoName string, number int, refTime time.Time) ([]review, error) { + cacheKey := fmt.Sprintf("%s/%s/%d/reviews", owner, repoName, number) + + if cached, ok := p.reviewsCache.Get(cacheKey); ok { + if !cached.CachedAt.Before(refTime) { + p.logger.DebugContext(ctx, "cache hit: reviews", "owner", owner, "repo", repoName, "pr", number, "count", len(cached.Data)) + return cached.Data, nil + } + p.logger.DebugContext(ctx, "cache miss: reviews expired", + "owner", owner, "repo", repoName, "pr", number, + "cached_at", cached.CachedAt, "reference_time", refTime) + } + + url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls/%d/reviews", p.baseURL, owner, repoName, number) + + var reviews []review + if err := p.doRequest(ctx, url, &reviews); err != nil { + return nil, err + } + + p.reviewsCache.Set(cacheKey, cachedReviews{Data: reviews, CachedAt: time.Now()}) + return reviews, nil +} + +func (p *Platform) fetchComments(ctx context.Context, owner, repoName string, number int, refTime time.Time) ([]comment, error) { + cacheKey := fmt.Sprintf("%s/%s/%d/comments", owner, repoName, number) + + if cached, ok := p.commentsCache.Get(cacheKey); ok { + if !cached.CachedAt.Before(refTime) { + p.logger.DebugContext(ctx, "cache hit: comments", "owner", owner, "repo", repoName, "pr", number, "count", len(cached.Data)) + return cached.Data, nil + } + p.logger.DebugContext(ctx, "cache miss: comments expired", + "owner", owner, "repo", repoName, "pr", number, + "cached_at", cached.CachedAt, "reference_time", refTime) + } + + url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d/comments", p.baseURL, owner, repoName, number) + + var comments []comment + if err := p.doRequest(ctx, url, &comments); err != nil { + return nil, err + } + + p.commentsCache.Set(cacheKey, cachedComments{Data: comments, CachedAt: time.Now()}) + return comments, nil +} + +func (p *Platform) fetchCommits(ctx context.Context, owner, repoName string, number int, refTime time.Time) ([]commit, error) { + cacheKey := fmt.Sprintf("%s/%s/%d/commits", owner, repoName, number) + + if cached, ok := p.commitsCache.Get(cacheKey); ok { + if !cached.CachedAt.Before(refTime) { + p.logger.DebugContext(ctx, "cache hit: commits", "owner", owner, "repo", repoName, "pr", number, "count", len(cached.Data)) + return cached.Data, nil + } + p.logger.DebugContext(ctx, "cache miss: commits expired", + "owner", owner, "repo", repoName, "pr", number, + "cached_at", cached.CachedAt, "reference_time", refTime) + } + + url := fmt.Sprintf("%s/api/v1/repos/%s/%s/pulls/%d/commits", p.baseURL, owner, repoName, number) + + var commits []commit + if err := p.doRequest(ctx, url, &commits); err != nil { + return nil, err + } + + p.commitsCache.Set(cacheKey, cachedCommits{Data: commits, CachedAt: time.Now()}) + return commits, nil +} + +func (p *Platform) fetchTimeline(ctx context.Context, owner, repoName string, number int, refTime time.Time) ([]timelineEvent, error) { + cacheKey := fmt.Sprintf("%s/%s/%d/timeline", owner, repoName, number) + + if cached, ok := p.timelineCache.Get(cacheKey); ok { + if !cached.CachedAt.Before(refTime) { + p.logger.DebugContext(ctx, "cache hit: timeline", "owner", owner, "repo", repoName, "pr", number, "count", len(cached.Data)) + return cached.Data, nil + } + p.logger.DebugContext(ctx, "cache miss: timeline expired", + "owner", owner, "repo", repoName, "pr", number, + "cached_at", cached.CachedAt, "reference_time", refTime) + } + + url := fmt.Sprintf("%s/api/v1/repos/%s/%s/issues/%d/timeline", p.baseURL, owner, repoName, number) + + var timeline []timelineEvent + if err := p.doRequest(ctx, url, &timeline); err != nil { + return nil, err + } + + p.timelineCache.Set(cacheKey, cachedTimeline{Data: timeline, CachedAt: time.Now()}) + return timeline, nil +} + +func (p *Platform) doRequest(ctx context.Context, url string, result any) (err error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + + if p.token != "" { + req.Header.Set("Authorization", "token "+p.token) + } + req.Header.Set("Accept", "application/json") + + p.logger.Debug("Gitea API request", "url", url) + + resp, err := p.httpClient.Do(req) + if err != nil { + return fmt.Errorf("execute request: %w", err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil && err == nil { + err = fmt.Errorf("close response body: %w", cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + body, readErr := io.ReadAll(resp.Body) + if readErr != nil { + return fmt.Errorf("gitea API error: %d %s (failed to read body: %w)", resp.StatusCode, resp.Status, readErr) + } + return fmt.Errorf("gitea API error: %d %s: %s", resp.StatusCode, resp.Status, string(body)) + } + + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return fmt.Errorf("decode response: %w", err) + } + + return nil +} + +// Conversion methods. + +func convertPullRequest(pr *pullRequest, reviews []review) prx.PullRequest { + result := prx.PullRequest{ + Number: pr.Number, + Title: pr.Title, + Body: pr.Body, + Author: pr.User.Login, + State: pr.State, + Draft: pr.Draft, + CreatedAt: pr.CreatedAt, + UpdatedAt: pr.UpdatedAt, + ClosedAt: pr.ClosedAt, + MergedAt: pr.MergedAt, + Merged: pr.Merged, + Additions: pr.Additions, + Deletions: pr.Deletions, + ChangedFiles: pr.ChangedFiles, + } + + // Set head SHA. + result.HeadSHA = pr.Head.SHA + + // Set merged by. + if pr.MergedBy != nil { + result.MergedBy = pr.MergedBy.Login + } + + // Set assignees. + for _, a := range pr.Assignees { + result.Assignees = append(result.Assignees, a.Login) + } + + // Set labels. + for _, l := range pr.Labels { + result.Labels = append(result.Labels, l.Name) + } + + // Set reviewers with their states. + result.Reviewers = make(map[string]prx.ReviewState) + for _, r := range pr.RequestedReviewers { + result.Reviewers[r.Login] = prx.ReviewStatePending + } + + // Update reviewer states from reviews. + for i := range reviews { + if reviews[i].Dismissed || reviews[i].Stale { + continue + } + result.Reviewers[reviews[i].User.Login] = convertReviewState(reviews[i].State) + } + + // Set mergeable state. + result.Mergeable = &pr.Mergeable + switch { + case pr.Mergeable: + result.MergeableState = "clean" + case pr.Draft: + result.MergeableState = "draft" + default: + result.MergeableState = "blocked" + } + + return result +} + +func convertReviewState(state string) prx.ReviewState { + switch strings.ToUpper(state) { + case "APPROVED": + return prx.ReviewStateApproved + case "REQUEST_CHANGES": + return prx.ReviewStateChangesRequested + case "COMMENT": + return prx.ReviewStateCommented + default: + return prx.ReviewStatePending + } +} + +func convertToEvents( + pr *pullRequest, + reviews []review, + comments []comment, + commits []commit, + timeline []timelineEvent, +) []prx.Event { + var events []prx.Event + + // Add PR opened event. + events = append(events, prx.Event{ + Timestamp: pr.CreatedAt, + Kind: prx.EventKindPROpened, + Actor: pr.User.Login, + }) + + // Add commit events. + for i := range commits { + actor := commits[i].Commit.Author.Name + if commits[i].Author != nil { + actor = commits[i].Author.Login + } + events = append(events, prx.Event{ + Timestamp: commits[i].Commit.Author.Date, + Kind: prx.EventKindCommit, + Actor: actor, + Body: commits[i].SHA[:7], + Description: firstLine(commits[i].Commit.Message), + }) + } + + // Add review events. + for i := range reviews { + events = append(events, prx.Event{ + Timestamp: reviews[i].SubmittedAt, + Kind: prx.EventKindReview, + Actor: reviews[i].User.Login, + Outcome: convertReviewOutcome(reviews[i].State), + Body: reviews[i].Body, + Question: prx.ContainsQuestion(reviews[i].Body), + Outdated: reviews[i].Stale || reviews[i].Dismissed, + }) + } + + // Add comment events. + for i := range comments { + events = append(events, prx.Event{ + Timestamp: comments[i].CreatedAt, + Kind: prx.EventKindComment, + Actor: comments[i].User.Login, + Body: comments[i].Body, + Question: prx.ContainsQuestion(comments[i].Body), + }) + } + + // Add timeline events. + for i := range timeline { + event := convertTimelineEvent(&timeline[i]) + if event != nil { + events = append(events, *event) + } + } + + // Add closed/merged events. + if pr.MergedAt != nil { + actor := "" + if pr.MergedBy != nil { + actor = pr.MergedBy.Login + } + events = append(events, prx.Event{ + Timestamp: *pr.MergedAt, + Kind: prx.EventKindPRMerged, + Actor: actor, + }) + } else if pr.ClosedAt != nil && pr.State == "closed" { + events = append(events, prx.Event{ + Timestamp: *pr.ClosedAt, + Kind: prx.EventKindPRClosed, + }) + } + + return events +} + +func convertReviewOutcome(state string) string { + switch strings.ToUpper(state) { + case "APPROVED": + return "approved" + case "REQUEST_CHANGES": + return "changes_requested" + case "COMMENT": + return "commented" + default: + return "pending" + } +} + +func convertTimelineEvent(event *timelineEvent) *prx.Event { + actor := "" + if event.User != nil { + actor = event.User.Login + } + + switch event.Type { + case "label": + if event.Label == nil { + return nil + } + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindLabeled, + Actor: actor, + Description: event.Label.Name, + } + case "unlabel": + if event.Label == nil { + return nil + } + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindUnlabeled, + Actor: actor, + Description: event.Label.Name, + } + case "assignees": + if event.Assignee == nil { + return nil + } + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindAssigned, + Actor: actor, + Target: event.Assignee.Login, + } + case "unassignees": + if event.Assignee == nil { + return nil + } + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindUnassigned, + Actor: actor, + Target: event.Assignee.Login, + } + case "review_requested": + target := "" + if event.Assignee != nil { + target = event.Assignee.Login + } + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindReviewRequested, + Actor: actor, + Target: target, + } + case "review_request_removed": + target := "" + if event.Assignee != nil { + target = event.Assignee.Login + } + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindReviewRequestRemoved, + Actor: actor, + Target: target, + } + case "close": + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindClosed, + Actor: actor, + } + case "reopen": + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindReopened, + Actor: actor, + } + case "change_title": + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindRenamedTitle, + Actor: actor, + Description: event.Body, + } + case "change_ref": + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindBaseRefChanged, + Actor: actor, + Description: fmt.Sprintf("%s -> %s", event.OldRef, event.NewRef), + } + case "merge": + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindMerged, + Actor: actor, + } + case "comment_ref", "issue_ref", "pull_ref": + return &prx.Event{ + Timestamp: event.CreatedAt, + Kind: prx.EventKindCrossReferenced, + Actor: actor, + Description: event.Body, + } + default: + // Unknown timeline event type - skip it. + return nil + } +} + +// Helper functions. + +func firstLine(s string) string { + if idx := strings.IndexByte(s, '\n'); idx >= 0 { + return s[:idx] + } + return s +} diff --git a/pkg/prx/gitea/platform_test.go b/pkg/prx/gitea/platform_test.go new file mode 100644 index 0000000..e9b5bf3 --- /dev/null +++ b/pkg/prx/gitea/platform_test.go @@ -0,0 +1,710 @@ +//nolint:errcheck // Test handlers don't need to check w.Write errors +package gitea + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/codeGROOVE-dev/prx/pkg/prx" +) + +func TestPlatform_Name(t *testing.T) { + tests := []struct { + name string + baseURL string + want string + }{ + { + name: "codeberg", + baseURL: "https://codeberg.org", + want: prx.PlatformCodeberg, + }, + { + name: "self-hosted gitea", + baseURL: "https://gitea.example.com", + want: "gitea", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := NewPlatform("token", WithBaseURL(tt.baseURL)) + if got := p.Name(); got != tt.want { + t.Errorf("Name() = %q, want %q", got, tt.want) + } + }) + } +} + +func TestNewCodebergPlatform(t *testing.T) { + p := NewCodebergPlatform("test-token") + if p.Name() != prx.PlatformCodeberg { + t.Errorf("NewCodebergPlatform().Name() = %q, want %q", p.Name(), prx.PlatformCodeberg) + } +} + +func TestPlatform_FetchPR(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch { + case strings.HasSuffix(r.URL.Path, "/pulls/123") && !strings.Contains(r.URL.Path, "/reviews") && !strings.Contains(r.URL.Path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "id": 1, + "number": 123, + "title": "Test PR", + "body": "Test description", + "state": "open", + "draft": false, + "mergeable": true, + "merged": false, + "additions": 100, + "deletions": 50, + "changed_files": 5, + "created_at": "2024-01-01T10:00:00Z", + "updated_at": "2024-01-02T12:00:00Z", + "user": { + "id": 1, + "login": "testauthor", + "full_name": "Test Author" + }, + "head": { + "ref": "feature-branch", + "sha": "abc123def456789" + }, + "base": { + "ref": "main", + "sha": "base123456789" + }, + "labels": [ + {"id": 1, "name": "bug", "color": "ff0000"} + ], + "assignees": [ + {"id": 2, "login": "assignee1"} + ], + "requested_reviewers": [ + {"id": 3, "login": "reviewer1"} + ] + }`)) + + case strings.Contains(r.URL.Path, "/reviews"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[ + { + "id": 1, + "user": {"id": 4, "login": "reviewer2"}, + "state": "APPROVED", + "body": "LGTM!", + "submitted_at": "2024-01-02T10:00:00Z", + "official": true, + "stale": false, + "dismissed": false + } + ]`)) + + case strings.Contains(r.URL.Path, "/comments"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[ + { + "id": 1, + "user": {"id": 5, "login": "commenter1"}, + "body": "Can you clarify this?", + "created_at": "2024-01-01T14:00:00Z", + "updated_at": "2024-01-01T14:00:00Z" + } + ]`)) + + case strings.Contains(r.URL.Path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[ + { + "sha": "abc123def456789", + "commit": { + "message": "Initial commit\n\nWith more details", + "author": { + "name": "Test Author", + "email": "test@example.com", + "date": "2024-01-01T09:00:00Z" + }, + "committer": { + "name": "Test Author", + "email": "test@example.com", + "date": "2024-01-01T09:00:00Z" + } + }, + "author": {"id": 1, "login": "testauthor"} + } + ]`)) + + case strings.Contains(r.URL.Path, "/timeline"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[ + { + "id": 1, + "type": "label", + "created_at": "2024-01-01T11:00:00Z", + "user": {"id": 1, "login": "testauthor"}, + "label": {"id": 1, "name": "bug"} + } + ]`)) + + default: + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`{"message": "not found"}`)) + } + })) + defer server.Close() + + p := NewPlatform("test-token", WithBaseURL(server.URL)) + ctx := context.Background() + + data, err := p.FetchPR(ctx, "owner", "repo", 123, time.Now()) + if err != nil { + t.Fatalf("FetchPR() error = %v", err) + } + + pr := data.PullRequest + + // Verify basic PR fields + if pr.Number != 123 { + t.Errorf("Number = %d, want 123", pr.Number) + } + if pr.Title != "Test PR" { + t.Errorf("Title = %q, want %q", pr.Title, "Test PR") + } + if pr.Author != "testauthor" { + t.Errorf("Author = %q, want %q", pr.Author, "testauthor") + } + if pr.State != "open" { + t.Errorf("State = %q, want %q", pr.State, "open") + } + if pr.Draft { + t.Error("Draft = true, want false") + } + if !*pr.Mergeable { + t.Error("Mergeable = false, want true") + } + if pr.MergeableState != "clean" { + t.Errorf("MergeableState = %q, want %q", pr.MergeableState, "clean") + } + if pr.Additions != 100 { + t.Errorf("Additions = %d, want 100", pr.Additions) + } + if pr.Deletions != 50 { + t.Errorf("Deletions = %d, want 50", pr.Deletions) + } + if pr.ChangedFiles != 5 { + t.Errorf("ChangedFiles = %d, want 5", pr.ChangedFiles) + } + if pr.HeadSHA != "abc123def456789" { + t.Errorf("HeadSHA = %q, want %q", pr.HeadSHA, "abc123def456789") + } + + // Verify labels + if len(pr.Labels) != 1 || pr.Labels[0] != "bug" { + t.Errorf("Labels = %v, want [bug]", pr.Labels) + } + + // Verify assignees + if len(pr.Assignees) != 1 || pr.Assignees[0] != "assignee1" { + t.Errorf("Assignees = %v, want [assignee1]", pr.Assignees) + } + + // Verify reviewers (requested + approved) + if len(pr.Reviewers) != 2 { + t.Errorf("len(Reviewers) = %d, want 2", len(pr.Reviewers)) + } + if pr.Reviewers["reviewer1"] != prx.ReviewStatePending { + t.Errorf("Reviewers[reviewer1] = %v, want %v", pr.Reviewers["reviewer1"], prx.ReviewStatePending) + } + if pr.Reviewers["reviewer2"] != prx.ReviewStateApproved { + t.Errorf("Reviewers[reviewer2] = %v, want %v", pr.Reviewers["reviewer2"], prx.ReviewStateApproved) + } + + // Verify events + if len(data.Events) < 4 { + t.Errorf("len(Events) = %d, want at least 4 (pr_opened, commit, review, comment)", len(data.Events)) + } + + // Check for expected event types + eventTypes := make(map[string]bool) + for _, e := range data.Events { + eventTypes[e.Kind] = true + } + expectedTypes := []string{prx.EventKindPROpened, prx.EventKindCommit, prx.EventKindReview, prx.EventKindComment, prx.EventKindLabeled} + for _, et := range expectedTypes { + if !eventTypes[et] { + t.Errorf("Missing event type %q in events", et) + } + } +} + +func TestPlatform_FetchPR_Merged(t *testing.T) { + mergedAt := "2024-01-03T15:00:00Z" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch { + case strings.HasSuffix(r.URL.Path, "/pulls/456") && !strings.Contains(r.URL.Path, "/reviews") && !strings.Contains(r.URL.Path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "id": 2, + "number": 456, + "title": "Merged PR", + "body": "", + "state": "closed", + "draft": false, + "mergeable": false, + "merged": true, + "merged_at": "` + mergedAt + `", + "closed_at": "` + mergedAt + `", + "created_at": "2024-01-01T10:00:00Z", + "updated_at": "2024-01-03T15:00:00Z", + "user": {"id": 1, "login": "author"}, + "merged_by": {"id": 2, "login": "merger"}, + "head": {"ref": "feature", "sha": "merged123"}, + "base": {"ref": "main", "sha": "base456"}, + "labels": [], + "assignees": [], + "requested_reviewers": [] + }`)) + case strings.Contains(r.URL.Path, "/reviews"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(r.URL.Path, "/comments"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(r.URL.Path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(r.URL.Path, "/timeline"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + p := NewPlatform("test-token", WithBaseURL(server.URL)) + ctx := context.Background() + + data, err := p.FetchPR(ctx, "owner", "repo", 456, time.Now()) + if err != nil { + t.Fatalf("FetchPR() error = %v", err) + } + + pr := data.PullRequest + + if !pr.Merged { + t.Error("Merged = false, want true") + } + if pr.MergedBy != "merger" { + t.Errorf("MergedBy = %q, want %q", pr.MergedBy, "merger") + } + if pr.MergedAt == nil { + t.Error("MergedAt = nil, want non-nil") + } + + // Check for merged event + hasMergedEvent := false + for _, e := range data.Events { + if e.Kind == prx.EventKindPRMerged { + hasMergedEvent = true + if e.Actor != "merger" { + t.Errorf("Merged event actor = %q, want %q", e.Actor, "merger") + } + } + } + if !hasMergedEvent { + t.Error("Missing pr_merged event") + } +} + +func TestPlatform_FetchPR_Draft(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + switch { + case strings.HasSuffix(r.URL.Path, "/pulls/789") && !strings.Contains(r.URL.Path, "/reviews") && !strings.Contains(r.URL.Path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "id": 3, + "number": 789, + "title": "WIP: Draft PR", + "body": "", + "state": "open", + "draft": true, + "mergeable": false, + "merged": false, + "created_at": "2024-01-01T10:00:00Z", + "updated_at": "2024-01-01T10:00:00Z", + "user": {"id": 1, "login": "author"}, + "head": {"ref": "wip", "sha": "draft123"}, + "base": {"ref": "main", "sha": "base789"}, + "labels": [], + "assignees": [], + "requested_reviewers": [] + }`)) + case strings.Contains(r.URL.Path, "/reviews"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(r.URL.Path, "/comments"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(r.URL.Path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(r.URL.Path, "/timeline"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + p := NewPlatform("test-token", WithBaseURL(server.URL)) + ctx := context.Background() + + data, err := p.FetchPR(ctx, "owner", "repo", 789, time.Now()) + if err != nil { + t.Fatalf("FetchPR() error = %v", err) + } + + pr := data.PullRequest + + if !pr.Draft { + t.Error("Draft = false, want true") + } + if pr.MergeableState != "draft" { + t.Errorf("MergeableState = %q, want %q", pr.MergeableState, "draft") + } +} + +func TestPlatform_FetchPR_APIError(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`{"message": "pull request not found"}`)) + })) + defer server.Close() + + p := NewPlatform("test-token", WithBaseURL(server.URL)) + ctx := context.Background() + + _, err := p.FetchPR(ctx, "owner", "repo", 999, time.Now()) + if err == nil { + t.Fatal("FetchPR() expected error for 404, got nil") + } + if !strings.Contains(err.Error(), "404") { + t.Errorf("Error should contain 404, got: %v", err) + } +} + +func TestConvertReviewState(t *testing.T) { + tests := []struct { + input string + want prx.ReviewState + }{ + {"APPROVED", prx.ReviewStateApproved}, + {"approved", prx.ReviewStateApproved}, + {"REQUEST_CHANGES", prx.ReviewStateChangesRequested}, + {"request_changes", prx.ReviewStateChangesRequested}, + {"COMMENT", prx.ReviewStateCommented}, + {"comment", prx.ReviewStateCommented}, + {"PENDING", prx.ReviewStatePending}, + {"pending", prx.ReviewStatePending}, + {"unknown", prx.ReviewStatePending}, + {"", prx.ReviewStatePending}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := convertReviewState(tt.input) + if got != tt.want { + t.Errorf("convertReviewState(%q) = %v, want %v", tt.input, got, tt.want) + } + }) + } +} + +func TestConvertReviewOutcome(t *testing.T) { + tests := []struct { + input string + want string + }{ + {"APPROVED", "approved"}, + {"approved", "approved"}, + {"REQUEST_CHANGES", "changes_requested"}, + {"COMMENT", "commented"}, + {"PENDING", "pending"}, + {"unknown", "pending"}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := convertReviewOutcome(tt.input) + if got != tt.want { + t.Errorf("convertReviewOutcome(%q) = %q, want %q", tt.input, got, tt.want) + } + }) + } +} + +func TestConvertTimelineEvent(t *testing.T) { + now := time.Now() + testUser := &user{Login: "testuser"} + testAssignee := &user{Login: "assignee"} + testLabel := &label{Name: "bug"} + + tests := []struct { + name string + event timelineEvent + wantKind string + wantActor string + wantNil bool + }{ + { + name: "label event", + event: timelineEvent{Type: "label", CreatedAt: now, User: testUser, Label: testLabel}, + wantKind: prx.EventKindLabeled, + wantActor: "testuser", + }, + { + name: "unlabel event", + event: timelineEvent{Type: "unlabel", CreatedAt: now, User: testUser, Label: testLabel}, + wantKind: prx.EventKindUnlabeled, + wantActor: "testuser", + }, + { + name: "label event without label", + event: timelineEvent{Type: "label", CreatedAt: now, User: testUser}, + wantNil: true, + }, + { + name: "assignees event", + event: timelineEvent{Type: "assignees", CreatedAt: now, User: testUser, Assignee: testAssignee}, + wantKind: prx.EventKindAssigned, + wantActor: "testuser", + }, + { + name: "unassignees event", + event: timelineEvent{Type: "unassignees", CreatedAt: now, User: testUser, Assignee: testAssignee}, + wantKind: prx.EventKindUnassigned, + wantActor: "testuser", + }, + { + name: "review_requested event", + event: timelineEvent{Type: "review_requested", CreatedAt: now, User: testUser, Assignee: testAssignee}, + wantKind: prx.EventKindReviewRequested, + wantActor: "testuser", + }, + { + name: "close event", + event: timelineEvent{Type: "close", CreatedAt: now, User: testUser}, + wantKind: prx.EventKindClosed, + wantActor: "testuser", + }, + { + name: "reopen event", + event: timelineEvent{Type: "reopen", CreatedAt: now, User: testUser}, + wantKind: prx.EventKindReopened, + wantActor: "testuser", + }, + { + name: "change_title event", + event: timelineEvent{Type: "change_title", CreatedAt: now, User: testUser, Body: "old -> new"}, + wantKind: prx.EventKindRenamedTitle, + wantActor: "testuser", + }, + { + name: "change_ref event", + event: timelineEvent{Type: "change_ref", CreatedAt: now, User: testUser, OldRef: "old", NewRef: "new"}, + wantKind: prx.EventKindBaseRefChanged, + wantActor: "testuser", + }, + { + name: "merge event", + event: timelineEvent{Type: "merge", CreatedAt: now, User: testUser}, + wantKind: prx.EventKindMerged, + wantActor: "testuser", + }, + { + name: "comment_ref event", + event: timelineEvent{Type: "comment_ref", CreatedAt: now, User: testUser}, + wantKind: prx.EventKindCrossReferenced, + wantActor: "testuser", + }, + { + name: "unknown event", + event: timelineEvent{Type: "unknown_type", CreatedAt: now, User: testUser}, + wantNil: true, + }, + { + name: "event without user", + event: timelineEvent{Type: "close", CreatedAt: now}, + wantKind: prx.EventKindClosed, + wantActor: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertTimelineEvent(&tt.event) + + if tt.wantNil { + if got != nil { + t.Errorf("convertTimelineEvent() = %+v, want nil", got) + } + return + } + + if got == nil { + t.Fatal("convertTimelineEvent() = nil, want non-nil") + } + if got.Kind != tt.wantKind { + t.Errorf("Kind = %q, want %q", got.Kind, tt.wantKind) + } + if got.Actor != tt.wantActor { + t.Errorf("Actor = %q, want %q", got.Actor, tt.wantActor) + } + }) + } +} + +func TestFirstLine(t *testing.T) { + tests := []struct { + input string + want string + }{ + {"single line", "single line"}, + {"first\nsecond", "first"}, + {"first\nsecond\nthird", "first"}, + {"", ""}, + {"\nstarting with newline", ""}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := firstLine(tt.input) + if got != tt.want { + t.Errorf("firstLine(%q) = %q, want %q", tt.input, got, tt.want) + } + }) + } +} + +func TestPlatform_WithOptions(t *testing.T) { + t.Run("WithLogger", func(t *testing.T) { + p := NewPlatform("token", WithLogger(nil)) + if p == nil { + t.Error("NewPlatform returned nil") + } + }) + + t.Run("WithHTTPClient", func(t *testing.T) { + customClient := &http.Client{Timeout: 60 * time.Second} + p := NewPlatform("token", WithHTTPClient(customClient)) + if p.httpClient != customClient { + t.Error("Custom HTTP client not set") + } + }) + + t.Run("WithBaseURL", func(t *testing.T) { + p := NewPlatform("token", WithBaseURL("https://gitea.example.com/")) + if p.baseURL != "https://gitea.example.com" { + t.Errorf("baseURL = %q, want %q", p.baseURL, "https://gitea.example.com") + } + }) +} + +func TestConvertPullRequest(t *testing.T) { + now := time.Now() + pr := &pullRequest{ + Number: 42, + Title: "Test PR", + Body: "Description", + State: "open", + Draft: false, + Mergeable: true, + Merged: false, + Additions: 10, + Deletions: 5, + ChangedFiles: 2, + CreatedAt: now, + UpdatedAt: now, + User: user{Login: "author"}, + Head: branch{SHA: "abc123"}, + Labels: []label{{Name: "enhancement"}}, + Assignees: []user{{Login: "dev1"}, {Login: "dev2"}}, + RequestedReviewers: []user{{Login: "reviewer"}}, + } + + reviews := []review{ + {User: user{Login: "approver"}, State: "APPROVED", SubmittedAt: now}, + } + + result := convertPullRequest(pr, reviews) + + if result.Number != 42 { + t.Errorf("Number = %d, want 42", result.Number) + } + if result.Title != "Test PR" { + t.Errorf("Title = %q, want %q", result.Title, "Test PR") + } + if result.Author != "author" { + t.Errorf("Author = %q, want %q", result.Author, "author") + } + if len(result.Labels) != 1 || result.Labels[0] != "enhancement" { + t.Errorf("Labels = %v, want [enhancement]", result.Labels) + } + if len(result.Assignees) != 2 { + t.Errorf("len(Assignees) = %d, want 2", len(result.Assignees)) + } + if result.Reviewers["reviewer"] != prx.ReviewStatePending { + t.Errorf("Reviewers[reviewer] = %v, want pending", result.Reviewers["reviewer"]) + } + if result.Reviewers["approver"] != prx.ReviewStateApproved { + t.Errorf("Reviewers[approver] = %v, want approved", result.Reviewers["approver"]) + } + if result.MergeableState != "clean" { + t.Errorf("MergeableState = %q, want clean", result.MergeableState) + } +} + +func TestConvertPullRequest_StaleReview(t *testing.T) { + now := time.Now() + pr := &pullRequest{ + Number: 1, + State: "open", + User: user{Login: "author"}, + Head: branch{SHA: "abc"}, + CreatedAt: now, + UpdatedAt: now, + } + + reviews := []review{ + {User: user{Login: "reviewer1"}, State: "APPROVED", Stale: true}, + {User: user{Login: "reviewer2"}, State: "APPROVED", Dismissed: true}, + {User: user{Login: "reviewer3"}, State: "APPROVED", Stale: false, Dismissed: false}, + } + + result := convertPullRequest(pr, reviews) + + // Stale and dismissed reviews should not update reviewer state + if _, exists := result.Reviewers["reviewer1"]; exists { + t.Error("Stale review should not update reviewer state") + } + if _, exists := result.Reviewers["reviewer2"]; exists { + t.Error("Dismissed review should not update reviewer state") + } + if result.Reviewers["reviewer3"] != prx.ReviewStateApproved { + t.Errorf("Reviewers[reviewer3] = %v, want approved", result.Reviewers["reviewer3"]) + } +} diff --git a/pkg/prx/github.go b/pkg/prx/github.go deleted file mode 100644 index 83772cb..0000000 --- a/pkg/prx/github.go +++ /dev/null @@ -1,23 +0,0 @@ -package prx - -import ( - "net/http" - - "github.com/codeGROOVE-dev/prx/pkg/prx/github" -) - -// newGitHubClient creates a new github.Client with the given configuration. -func newGitHubClient(httpClient *http.Client, token, baseURL string) *github.Client { - return &github.Client{ - HTTPClient: httpClient, - Token: token, - BaseURL: baseURL, - } -} - -// newTestGitHubClient creates a github.Client for testing with custom HTTP client and base URL. -// -//nolint:unparam // token is always "test-token" in tests but should remain a parameter for flexibility -func newTestGitHubClient(httpClient *http.Client, token, baseURL string) *github.Client { - return newGitHubClient(httpClient, token, baseURL) -} diff --git a/pkg/prx/check_run_history_test.go b/pkg/prx/github/check_run_history_test.go similarity index 92% rename from pkg/prx/check_run_history_test.go rename to pkg/prx/github/check_run_history_test.go index cde892a..96fc62c 100644 --- a/pkg/prx/check_run_history_test.go +++ b/pkg/prx/github/check_run_history_test.go @@ -1,5 +1,5 @@ //nolint:errcheck,gocritic // Test handlers don't need to check w.Write errors; if-else chains are fine for URL routing -package prx +package github import ( "context" @@ -9,6 +9,8 @@ import ( "strings" "testing" "time" + + "github.com/codeGROOVE-dev/prx/pkg/prx" ) // TestCheckRunHistory_MultipleCommits tests that we capture check run failures @@ -162,9 +164,8 @@ func TestCheckRunHistory_MultipleCommits(t *testing.T) { })) defer server.Close() - httpClient := &http.Client{Transport: http.DefaultTransport} - client := NewClient("test-token", WithHTTPClient(httpClient)) - client.github = newTestGitHubClient(httpClient, "test-token", server.URL) + platform := NewTestPlatform("test-token", server.URL) + client := prx.NewClientWithPlatform(platform) ctx := context.Background() prData, err := client.PullRequest(ctx, "codeGROOVE-dev", "slacker", 66) @@ -179,7 +180,7 @@ func TestCheckRunHistory_MultipleCommits(t *testing.T) { checkRunCount := 0 failureCount := 0 successCount := 0 - var checkRunEvents []Event + var checkRunEvents []prx.Event for _, event := range prData.Events { if event.Kind == "check_run" { @@ -219,9 +220,9 @@ func TestCheckRunHistory_MultipleCommits(t *testing.T) { } } - // Verify the CheckSummary shows the LATEST state (both checks passing) + // Verify the prx.CheckSummary shows the LATEST state (both checks passing) if prData.PullRequest.CheckSummary == nil { - t.Fatal("Expected CheckSummary to be set") + t.Fatal("Expected prx.CheckSummary to be set") } if len(prData.PullRequest.CheckSummary.Success) != 2 { @@ -313,9 +314,8 @@ func TestCheckRunHistory_CommitSHAPreservation(t *testing.T) { })) defer server.Close() - httpClient := &http.Client{Transport: http.DefaultTransport} - client := NewClient("test-token", WithHTTPClient(httpClient)) - client.github = newTestGitHubClient(httpClient, "test-token", server.URL) + platform := NewTestPlatform("test-token", server.URL) + client := prx.NewClientWithPlatform(platform) ctx := context.Background() prData, err := client.PullRequest(ctx, "testowner", "testrepo", 100) @@ -324,7 +324,7 @@ func TestCheckRunHistory_CommitSHAPreservation(t *testing.T) { } // Find the commit event - var commitEvent *Event + var commitEvent *prx.Event for i := range prData.Events { if prData.Events[i].Kind == "commit" { commitEvent = &prData.Events[i] @@ -347,7 +347,7 @@ func TestCheckRunHistory_CommitSHAPreservation(t *testing.T) { } // Find the check_run event - var checkRunEvent *Event + var checkRunEvent *prx.Event for i := range prData.Events { if prData.Events[i].Kind == "check_run" { checkRunEvent = &prData.Events[i] @@ -370,11 +370,11 @@ func TestCheckRunHistory_CommitSHAPreservation(t *testing.T) { } } -// TestCheckRunHistory_LatestStateCalculation tests that calculateCheckSummary +// TestCheckRunHistory_LatestStateCalculation tests that Calculateprx.CheckSummary // correctly identifies the latest state when multiple runs exist for the same check. func TestCheckRunHistory_LatestStateCalculation(t *testing.T) { // Create events with multiple runs of the same check at different times - events := []Event{ + events := []prx.Event{ { Kind: "check_run", Timestamp: time.Date(2025, 1, 1, 10, 0, 0, 0, time.UTC), @@ -398,7 +398,7 @@ func TestCheckRunHistory_LatestStateCalculation(t *testing.T) { }, } - summary := calculateCheckSummary(events, nil) + summary := prx.CalculateCheckSummary(events, nil) // The latest run (12:00) was successful, so the check should be in Success if len(summary.Success) != 1 { @@ -418,7 +418,7 @@ func TestCheckRunHistory_LatestStateCalculation(t *testing.T) { // correctly handles events that arrive out of chronological order. func TestCheckRunHistory_OutOfOrderEvents(t *testing.T) { // Events intentionally out of order - older success should not override newer failure - events := []Event{ + events := []prx.Event{ { Kind: "check_run", Timestamp: time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC), // Newest (failure) @@ -442,7 +442,7 @@ func TestCheckRunHistory_OutOfOrderEvents(t *testing.T) { }, } - summary := calculateCheckSummary(events, nil) + summary := prx.CalculateCheckSummary(events, nil) // The latest run (12:00) failed, so the check should be in Failing if len(summary.Failing) != 1 { @@ -460,68 +460,68 @@ func TestCheckRunHistory_OutOfOrderEvents(t *testing.T) { // TestCalculateTestStateFromCheckSummary tests the calculateTestStateFromCheckSummary function. func TestCalculateTestStateFromCheckSummary(t *testing.T) { - client := &Client{} + platform := &Platform{} tests := []struct { name string - summary *CheckSummary + summary *prx.CheckSummary wantState string }{ { name: "nil summary returns none", summary: nil, - wantState: TestStateNone, + wantState: prx.TestStateNone, }, { name: "failing checks returns failing", - summary: &CheckSummary{ + summary: &prx.CheckSummary{ Success: map[string]string{"test1": "passed"}, Failing: map[string]string{"test2": "failed"}, Pending: map[string]string{}, }, - wantState: TestStateFailing, + wantState: prx.TestStateFailing, }, { name: "only pending checks returns pending", - summary: &CheckSummary{ + summary: &prx.CheckSummary{ Success: map[string]string{}, Failing: map[string]string{}, Pending: map[string]string{"test1": "waiting"}, }, - wantState: TestStatePending, + wantState: prx.TestStatePending, }, { name: "only successful checks returns passing", - summary: &CheckSummary{ + summary: &prx.CheckSummary{ Success: map[string]string{"test1": "passed", "test2": "passed"}, Failing: map[string]string{}, Pending: map[string]string{}, }, - wantState: TestStatePassing, + wantState: prx.TestStatePassing, }, { name: "no checks returns none", - summary: &CheckSummary{ + summary: &prx.CheckSummary{ Success: map[string]string{}, Failing: map[string]string{}, Pending: map[string]string{}, }, - wantState: TestStateNone, + wantState: prx.TestStateNone, }, { name: "failing takes precedence over pending", - summary: &CheckSummary{ + summary: &prx.CheckSummary{ Success: map[string]string{}, Failing: map[string]string{"test1": "failed"}, Pending: map[string]string{"test2": "waiting"}, }, - wantState: TestStateFailing, + wantState: prx.TestStateFailing, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := client.calculateTestStateFromCheckSummary(tt.summary) + got := platform.calculateTestStateFromCheckSummary(tt.summary) if got != tt.wantState { t.Errorf("calculateTestStateFromCheckSummary() = %v, want %v", got, tt.wantState) } diff --git a/pkg/prx/checks_test.go b/pkg/prx/github/checks_test.go similarity index 94% rename from pkg/prx/checks_test.go rename to pkg/prx/github/checks_test.go index da3fbd5..3f68ca9 100644 --- a/pkg/prx/checks_test.go +++ b/pkg/prx/github/checks_test.go @@ -1,5 +1,5 @@ //nolint:errcheck,gocritic // Test handlers don't need to check w.Write errors; if-else chains are fine for URL routing -package prx +package github import ( "context" @@ -7,6 +7,8 @@ import ( "net/http/httptest" "strings" "testing" + + "github.com/codeGROOVE-dev/prx/pkg/prx" ) func TestClient_PullRequestWithCheckRuns(t *testing.T) { @@ -125,9 +127,8 @@ func TestClient_PullRequestWithCheckRuns(t *testing.T) { })) defer server.Close() - httpClient := &http.Client{Transport: http.DefaultTransport} - client := NewClient("test-token", WithHTTPClient(httpClient)) - client.github = newTestGitHubClient(httpClient, "test-token", server.URL) + platform := NewTestPlatform("test-token", server.URL) + client := prx.NewClientWithPlatform(platform) ctx := context.Background() prData, err := client.PullRequest(ctx, "testowner", "testrepo", 555) @@ -237,9 +238,8 @@ func TestClient_PullRequestWithBranchProtection(t *testing.T) { })) defer server.Close() - httpClient := &http.Client{Transport: http.DefaultTransport} - client := NewClient("test-token", WithHTTPClient(httpClient)) - client.github = newTestGitHubClient(httpClient, "test-token", server.URL) + platform := NewTestPlatform("test-token", server.URL) + client := prx.NewClientWithPlatform(platform) ctx := context.Background() prData, err := client.PullRequest(ctx, "testowner", "testrepo", 666) diff --git a/pkg/prx/github/client_test.go b/pkg/prx/github/client_test.go index f913d31..f9d15d6 100644 --- a/pkg/prx/github/client_test.go +++ b/pkg/prx/github/client_test.go @@ -436,3 +436,307 @@ func TestClient_ContextCancellation(t *testing.T) { t.Error("Expected context cancellation error but got none") } } + +func TestClient_GraphQL(t *testing.T) { + tests := []struct { + name string + serverHandler http.HandlerFunc + query string + variables map[string]any + wantErr bool + wantErrStatus int + }{ + { + name: "successful query", + serverHandler: func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Errorf("Expected POST, got %s", r.Method) + } + if r.Header.Get("Authorization") != "Bearer test-token" { + t.Errorf("Expected Authorization header") + } + if r.Header.Get("Content-Type") != "application/json" { + t.Errorf("Expected Content-Type application/json") + } + if r.Header.Get("Accept") != "application/vnd.github.v4+json" { + t.Errorf("Expected Accept header for GraphQL") + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"data": {"repository": {"name": "test-repo"}}}`)) + }, + query: "query { repository(owner: $owner, name: $name) { name } }", + variables: map[string]any{"owner": "testowner", "name": "testrepo"}, + wantErr: false, + }, + { + name: "api error", + serverHandler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + _, _ = w.Write([]byte(`{"message": "Bad credentials"}`)) + }, + query: "query { viewer { login } }", + wantErr: true, + wantErrStatus: http.StatusUnauthorized, + }, + { + name: "server error", + serverHandler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(`{"message": "Internal Server Error"}`)) + }, + query: "query { viewer { login } }", + wantErr: true, + wantErrStatus: http.StatusInternalServerError, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := httptest.NewServer(tt.serverHandler) + defer server.Close() + + client := &Client{ + HTTPClient: server.Client(), + Token: "test-token", + BaseURL: server.URL, + } + + var result map[string]any + err := client.GraphQL(context.Background(), tt.query, tt.variables, &result) + + if tt.wantErr { + if err == nil { + t.Error("Expected error but got none") + } + var apiErr *Error + if errors.As(err, &apiErr) && tt.wantErrStatus != 0 { + if apiErr.StatusCode != tt.wantErrStatus { + t.Errorf("Expected status %d, got %d", tt.wantErrStatus, apiErr.StatusCode) + } + } + } else if err != nil { + t.Errorf("Unexpected error: %v", err) + } + }) + } +} + +func TestClient_GraphQL_DefaultBaseURL(t *testing.T) { + // This test verifies that the default GitHub API URL is used when BaseURL is empty + // We can't actually test against real API, but we test the logic path + client := &Client{ + HTTPClient: &http.Client{Timeout: 1 * time.Millisecond}, // Very short timeout to fail fast + Token: "test-token", + BaseURL: "", // Empty to trigger default + } + + var result map[string]any + err := client.GraphQL(context.Background(), "query { viewer { login } }", nil, &result) + // Should fail due to timeout/connection, but the point is it tried the default URL + if err == nil { + t.Error("Expected error (timeout) but got none") + } +} + +func TestTransport_RoundTrip(t *testing.T) { + tests := []struct { + name string + serverHandler http.HandlerFunc + wantErr bool + wantStatusCode int + }{ + { + name: "successful request", + serverHandler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"test": "data"}`)) + }, + wantErr: false, + wantStatusCode: http.StatusOK, + }, + { + name: "server error returns after exhausting retries", + serverHandler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) // Return OK to avoid retry in test + _, _ = w.Write([]byte(`{}`)) + }, + wantErr: false, + wantStatusCode: http.StatusOK, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := httptest.NewServer(tt.serverHandler) + defer server.Close() + + transport := &Transport{Base: http.DefaultTransport} + client := &http.Client{Transport: transport} + + req, _ := http.NewRequestWithContext(context.Background(), http.MethodGet, server.URL, http.NoBody) + resp, err := client.Do(req) + + if tt.wantErr { + if err == nil { + t.Error("Expected error but got none") + } + } else { + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode != tt.wantStatusCode { + t.Errorf("Expected status %d, got %d", tt.wantStatusCode, resp.StatusCode) + } + } + } + }) + } +} + +func TestTransport_RoundTrip_WithBody(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{}`)) + })) + defer server.Close() + + transport := &Transport{Base: http.DefaultTransport} + client := &http.Client{Transport: transport} + + body := strings.NewReader(`{"test": "data"}`) + req, _ := http.NewRequestWithContext(context.Background(), http.MethodPost, server.URL, body) + resp, err := client.Do(req) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Errorf("Expected status 200, got %d", resp.StatusCode) + } + } +} + +func TestTransport_RoundTrip_NilBase(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{}`)) + })) + defer server.Close() + + transport := &Transport{Base: nil} // Will default to http.DefaultTransport + client := &http.Client{Transport: transport} + + req, _ := http.NewRequestWithContext(context.Background(), http.MethodGet, server.URL, http.NoBody) + resp, err := client.Do(req) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if resp != nil { + defer resp.Body.Close() + } +} + +func TestRetryableError_Error(t *testing.T) { + err := &retryableError{StatusCode: http.StatusTooManyRequests} + errMsg := err.Error() + if errMsg != "Too Many Requests" { + t.Errorf("Expected 'Too Many Requests', got %q", errMsg) + } +} + +func TestTransport_RateLimitRetry(t *testing.T) { + callCount := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + callCount++ + if callCount == 1 { + // First call returns rate limit + w.Header().Set("X-Ratelimit-Remaining", "0") + w.WriteHeader(http.StatusForbidden) + _, _ = w.Write([]byte(`{"message": "rate limit exceeded"}`)) + return + } + // Second call succeeds + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{}`)) + })) + defer server.Close() + + transport := &Transport{Base: http.DefaultTransport} + client := &http.Client{Transport: transport} + + req, _ := http.NewRequestWithContext(context.Background(), http.MethodGet, server.URL, http.NoBody) + resp, err := client.Do(req) + + // The retry logic should kick in for rate limit + if err != nil && callCount < 2 { + t.Logf("Request failed after %d calls: %v", callCount, err) + } + if resp != nil { + defer resp.Body.Close() + } +} + +func TestTransport_ServerErrorRetry(t *testing.T) { + callCount := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + callCount++ + if callCount == 1 { + // First call returns 500 + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(`{"message": "server error"}`)) + return + } + // Second call succeeds + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{}`)) + })) + defer server.Close() + + transport := &Transport{Base: http.DefaultTransport} + client := &http.Client{Transport: transport} + + req, _ := http.NewRequestWithContext(context.Background(), http.MethodGet, server.URL, http.NoBody) + resp, err := client.Do(req) + + // The retry logic should kick in for 5xx errors + if err != nil && callCount < 2 { + t.Logf("Request failed after %d calls: %v", callCount, err) + } + if resp != nil { + defer resp.Body.Close() + } +} + +func TestTransport_TooManyRequestsRetry(t *testing.T) { + callCount := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + callCount++ + if callCount == 1 { + // First call returns 429 + w.WriteHeader(http.StatusTooManyRequests) + _, _ = w.Write([]byte(`{"message": "too many requests"}`)) + return + } + // Second call succeeds + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{}`)) + })) + defer server.Close() + + transport := &Transport{Base: http.DefaultTransport} + client := &http.Client{Transport: transport} + + req, _ := http.NewRequestWithContext(context.Background(), http.MethodGet, server.URL, http.NoBody) + resp, err := client.Do(req) + + // The retry logic should kick in for 429 errors + if err != nil && callCount < 2 { + t.Logf("Request failed after %d calls: %v", callCount, err) + } + if resp != nil { + defer resp.Body.Close() + } +} diff --git a/pkg/prx/collaborators_test.go b/pkg/prx/github/collaborators_test.go similarity index 74% rename from pkg/prx/collaborators_test.go rename to pkg/prx/github/collaborators_test.go index f68ec3f..30242c6 100644 --- a/pkg/prx/collaborators_test.go +++ b/pkg/prx/github/collaborators_test.go @@ -1,10 +1,12 @@ -package prx +package github import ( "context" "log/slog" "testing" + "github.com/codeGROOVE-dev/prx/pkg/prx" + "github.com/codeGROOVE-dev/fido" ) @@ -14,16 +16,16 @@ func TestPermissionToWriteAccess(t *testing.T) { permission string expected int }{ - {"admin", WriteAccessDefinitely}, - {"maintain", WriteAccessDefinitely}, - {"write", WriteAccessDefinitely}, - {"read", WriteAccessNo}, - {"triage", WriteAccessNo}, - {"none", WriteAccessNo}, - {"", WriteAccessUnlikely}, // Not in collaborators list - {"unknown", WriteAccessUnlikely}, // Unknown permission - {"ADMIN", WriteAccessUnlikely}, // Case sensitive - not matched - {"something", WriteAccessUnlikely}, // Invalid permission + {"admin", prx.WriteAccessDefinitely}, + {"maintain", prx.WriteAccessDefinitely}, + {"write", prx.WriteAccessDefinitely}, + {"read", prx.WriteAccessNo}, + {"triage", prx.WriteAccessNo}, + {"none", prx.WriteAccessNo}, + {"", prx.WriteAccessUnlikely}, // Not in collaborators list + {"unknown", prx.WriteAccessUnlikely}, // Unknown permission + {"ADMIN", prx.WriteAccessUnlikely}, // Case sensitive - not matched + {"something", prx.WriteAccessUnlikely}, // Invalid permission } for _, tt := range tests { @@ -32,11 +34,11 @@ func TestPermissionToWriteAccess(t *testing.T) { var result int switch tt.permission { case "admin", "maintain", "write": - result = WriteAccessDefinitely + result = prx.WriteAccessDefinitely case "read", "triage", "none": - result = WriteAccessNo + result = prx.WriteAccessNo default: - result = WriteAccessUnlikely + result = prx.WriteAccessUnlikely } if result != tt.expected { t.Errorf("permission mapping for %q = %d, want %d", @@ -97,37 +99,37 @@ func TestWriteAccessFromAssociationWithCache(t *testing.T) { name: "member with admin permission", user: "alice", permission: "admin", - expected: WriteAccessDefinitely, + expected: prx.WriteAccessDefinitely, }, { name: "member with write permission", user: "bob", permission: "write", - expected: WriteAccessDefinitely, + expected: prx.WriteAccessDefinitely, }, { name: "member with maintain permission", user: "charlie", permission: "maintain", - expected: WriteAccessDefinitely, + expected: prx.WriteAccessDefinitely, }, { name: "member with read permission", user: "david", permission: "read", - expected: WriteAccessNo, + expected: prx.WriteAccessNo, }, { name: "member with triage permission", user: "eve", permission: "triage", - expected: WriteAccessNo, + expected: prx.WriteAccessNo, }, { name: "member not in collaborators list", user: "frank", permission: "", // Not in the cache - expected: WriteAccessUnlikely, + expected: prx.WriteAccessUnlikely, }, } @@ -150,13 +152,13 @@ func TestWriteAccessFromAssociationWithCache(t *testing.T) { cacheKey := collaboratorsCacheKey("owner", "repo") cache.Set(cacheKey, collabs) - // Create client with cache - c := &Client{ + // Create platform with cache + p := &Platform{ logger: slog.Default(), collaboratorsCache: cache, } - result := c.writeAccessFromAssociation(ctx, "owner", "repo", tt.user, "MEMBER") + result := p.writeAccessFromAssociation(ctx, "owner", "repo", tt.user, "MEMBER") if result != tt.expected { t.Errorf("writeAccessFromAssociation(MEMBER, %s) = %d, want %d", tt.user, result, tt.expected) @@ -180,18 +182,18 @@ func TestWriteAccessFromAssociationCacheHit(t *testing.T) { cacheKey := collaboratorsCacheKey("codeGROOVE-dev", "goose") cache.Set(cacheKey, collabs) - // Create client with cache but without a real GitHub client + // Create platform with cache but without a real HTTP client // This tests that we use the cache and don't try to call the API - c := &Client{ + p := &Platform{ logger: slog.Default(), collaboratorsCache: cache, - github: nil, // No GitHub client - would fail if API called + client: nil, // No HTTP client - would fail if API called } - result := c.writeAccessFromAssociation(ctx, "codeGROOVE-dev", "goose", "tstromberg", "MEMBER") - if result != WriteAccessDefinitely { + result := p.writeAccessFromAssociation(ctx, "codeGROOVE-dev", "goose", "tstromberg", "MEMBER") + if result != prx.WriteAccessDefinitely { t.Errorf("writeAccessFromAssociation(MEMBER, tstromberg) = %d, want %d", - result, WriteAccessDefinitely) + result, prx.WriteAccessDefinitely) } } @@ -202,7 +204,7 @@ func TestWriteAccessFromAssociationNonMember(t *testing.T) { // Empty cache cache := fido.New[string, map[string]string](fido.TTL(collaboratorsCacheTTL)) - c := &Client{ + p := &Platform{ logger: slog.Default(), collaboratorsCache: cache, } @@ -211,17 +213,17 @@ func TestWriteAccessFromAssociationNonMember(t *testing.T) { association string expected int }{ - {"OWNER", WriteAccessDefinitely}, - {"COLLABORATOR", WriteAccessDefinitely}, - {"CONTRIBUTOR", WriteAccessUnlikely}, - {"NONE", WriteAccessUnlikely}, - {"FIRST_TIME_CONTRIBUTOR", WriteAccessUnlikely}, - {"FIRST_TIMER", WriteAccessUnlikely}, + {"OWNER", prx.WriteAccessDefinitely}, + {"COLLABORATOR", prx.WriteAccessDefinitely}, + {"CONTRIBUTOR", prx.WriteAccessUnlikely}, + {"NONE", prx.WriteAccessUnlikely}, + {"FIRST_TIME_CONTRIBUTOR", prx.WriteAccessUnlikely}, + {"FIRST_TIMER", prx.WriteAccessUnlikely}, } for _, tt := range tests { t.Run(tt.association, func(t *testing.T) { - result := c.writeAccessFromAssociation(ctx, "owner", "repo", "user", tt.association) + result := p.writeAccessFromAssociation(ctx, "owner", "repo", "user", tt.association) if result != tt.expected { t.Errorf("writeAccessFromAssociation(%s) = %d, want %d", tt.association, result, tt.expected) diff --git a/pkg/prx/graphql_complete_test.go b/pkg/prx/github/graphql_complete_test.go similarity index 96% rename from pkg/prx/graphql_complete_test.go rename to pkg/prx/github/graphql_complete_test.go index ac16ec4..1511aad 100644 --- a/pkg/prx/graphql_complete_test.go +++ b/pkg/prx/github/graphql_complete_test.go @@ -1,4 +1,4 @@ -package prx +package github import ( "context" @@ -9,6 +9,7 @@ import ( "time" "github.com/codeGROOVE-dev/fido" + "github.com/codeGROOVE-dev/prx/pkg/prx" ) func TestIsBot(t *testing.T) { @@ -150,10 +151,14 @@ func TestConvertGraphQLReviewCommentsWithOutdated(t *testing.T) { })) defer server.Close() - client := &Client{ + platform := &Platform{ logger: slog.Default(), collaboratorsCache: fido.New[string, map[string]string](fido.TTL(collaboratorsCacheTTL)), - github: newTestGitHubClient(&http.Client{}, "test-token", server.URL), + client: &Client{ + HTTPClient: &http.Client{}, + Token: "test-token", + BaseURL: server.URL, + }, } ctx := context.Background() @@ -266,10 +271,10 @@ func TestConvertGraphQLReviewCommentsWithOutdated(t *testing.T) { } // Convert GraphQL data to events - events := client.convertGraphQLToEventsComplete(ctx, data, "testowner", "testrepo") + events := platform.convertGraphQLToEventsComplete(ctx, data, "testowner", "testrepo") // Filter to only review_comment events - var reviewComments []Event + var reviewComments []prx.Event for _, event := range events { if event.Kind == "review_comment" { reviewComments = append(reviewComments, event) diff --git a/pkg/prx/graphql_events_test.go b/pkg/prx/github/graphql_events_test.go similarity index 95% rename from pkg/prx/graphql_events_test.go rename to pkg/prx/github/graphql_events_test.go index ae694fa..45d6078 100644 --- a/pkg/prx/graphql_events_test.go +++ b/pkg/prx/github/graphql_events_test.go @@ -1,5 +1,5 @@ //nolint:errcheck,gocritic // Test handlers don't need to check w.Write errors; if-else chains are fine for URL routing -package prx +package github import ( "context" @@ -7,6 +7,8 @@ import ( "net/http/httptest" "strings" "testing" + + "github.com/codeGROOVE-dev/prx/pkg/prx" ) func TestClient_PullRequestWithReviews(t *testing.T) { @@ -144,9 +146,8 @@ func TestClient_PullRequestWithReviews(t *testing.T) { })) defer server.Close() - httpClient := &http.Client{Transport: http.DefaultTransport} - client := NewClient("test-token", WithHTTPClient(httpClient)) - client.github = newTestGitHubClient(httpClient, "test-token", server.URL) + platform := NewTestPlatform("test-token", server.URL) + client := prx.NewClientWithPlatform(platform) ctx := context.Background() prData, err := client.PullRequest(ctx, "testowner", "testrepo", 789) @@ -260,9 +261,8 @@ func TestClient_PullRequestWithBots(t *testing.T) { })) defer server.Close() - httpClient := &http.Client{Transport: http.DefaultTransport} - client := NewClient("test-token", WithHTTPClient(httpClient)) - client.github = newTestGitHubClient(httpClient, "test-token", server.URL) + platform := NewTestPlatform("test-token", server.URL) + client := prx.NewClientWithPlatform(platform) ctx := context.Background() prData, err := client.PullRequest(ctx, "testowner", "testrepo", 999) diff --git a/pkg/prx/graphql_parity_test.go b/pkg/prx/github/graphql_parity_test.go similarity index 88% rename from pkg/prx/graphql_parity_test.go rename to pkg/prx/github/graphql_parity_test.go index a64403c..19a18bc 100644 --- a/pkg/prx/graphql_parity_test.go +++ b/pkg/prx/github/graphql_parity_test.go @@ -1,4 +1,4 @@ -package prx +package github import ( "context" @@ -10,6 +10,8 @@ import ( "testing" "time" + "github.com/codeGROOVE-dev/prx/pkg/prx" + "github.com/codeGROOVE-dev/fido" ) @@ -20,7 +22,7 @@ func TestGraphQLParity(t *testing.T) { t.Skip("Requires real GitHub API access for full parity testing") ctx := context.Background() - client := &Client{} // Would need proper initialization + platform := &Platform{} // Would need proper initialization refTime := time.Now() // Test data @@ -29,13 +31,13 @@ func TestGraphQLParity(t *testing.T) { prNumber := 1 // Fetch via direct call (non-cached) - restData, err := client.pullRequestViaGraphQL(ctx, owner, repo, prNumber, refTime) + restData, err := platform.FetchPR(ctx, owner, repo, prNumber, refTime) if err != nil { t.Fatalf("Direct fetch failed: %v", err) } // Fetch via GraphQL - graphqlData, err := client.pullRequestViaGraphQL(ctx, owner, repo, prNumber, refTime) + graphqlData, err := platform.FetchPR(ctx, owner, repo, prNumber, refTime) if err != nil { t.Fatalf("GraphQL fetch failed: %v", err) } @@ -45,7 +47,7 @@ func TestGraphQLParity(t *testing.T) { } // comparePullRequestData compares REST and GraphQL results -func comparePullRequestData(t *testing.T, rest, graphql *PullRequestData) { +func comparePullRequestData(t *testing.T, rest, graphql *prx.PullRequestData) { t.Helper() // Compare PullRequest fields pr1 := rest.PullRequest @@ -89,7 +91,7 @@ func comparePullRequestData(t *testing.T, rest, graphql *PullRequestData) { } // countEventsByType counts events by their Kind -func countEventsByType(events []Event) map[string]int { +func countEventsByType(events []prx.Event) map[string]int { counts := make(map[string]int) for i := range events { counts[events[i].Kind]++ @@ -98,7 +100,7 @@ func countEventsByType(events []Event) map[string]int { } // compareEvents compares event details -func compareEvents(t *testing.T, restEvents, graphqlEvents []Event) { +func compareEvents(t *testing.T, restEvents, graphqlEvents []prx.Event) { t.Helper() // Sort events by timestamp and kind for comparison sort.Slice(restEvents, func(i, j int) bool { @@ -128,7 +130,7 @@ func compareEvents(t *testing.T, restEvents, graphqlEvents []Event) { } // For events with write access, ensure it's preserved - if rest.WriteAccess != WriteAccessNA && graphql.WriteAccess == WriteAccessNA { + if rest.WriteAccess != prx.WriteAccessNA && graphql.WriteAccess == prx.WriteAccessNA { t.Errorf("WriteAccess lost for event %s by %s: REST=%d, GraphQL=%d", rest.Kind, rest.Actor, rest.WriteAccess, graphql.WriteAccess) } @@ -265,29 +267,29 @@ func TestWriteAccessMapping(t *testing.T) { })) defer server.Close() - c := &Client{ + p := &Platform{ logger: slog.Default(), collaboratorsCache: fido.New[string, map[string]string](fido.TTL(collaboratorsCacheTTL)), - github: newTestGitHubClient(&http.Client{}, "test-token", server.URL), + client: &Client{HTTPClient: &http.Client{}, Token: "test-token", BaseURL: server.URL}, } tests := []struct { association string expected int }{ - {"OWNER", WriteAccessDefinitely}, - {"COLLABORATOR", WriteAccessDefinitely}, - {"MEMBER", WriteAccessLikely}, // Falls back to likely when collaborators API unavailable - {"CONTRIBUTOR", WriteAccessUnlikely}, - {"NONE", WriteAccessUnlikely}, - {"FIRST_TIME_CONTRIBUTOR", WriteAccessUnlikely}, - {"FIRST_TIMER", WriteAccessUnlikely}, - {"UNKNOWN", WriteAccessNA}, + {"OWNER", prx.WriteAccessDefinitely}, + {"COLLABORATOR", prx.WriteAccessDefinitely}, + {"MEMBER", prx.WriteAccessLikely}, // Falls back to likely when collaborators API unavailable + {"CONTRIBUTOR", prx.WriteAccessUnlikely}, + {"NONE", prx.WriteAccessUnlikely}, + {"FIRST_TIME_CONTRIBUTOR", prx.WriteAccessUnlikely}, + {"FIRST_TIMER", prx.WriteAccessUnlikely}, + {"UNKNOWN", prx.WriteAccessNA}, } for _, tt := range tests { t.Run(tt.association, func(t *testing.T) { - result := c.writeAccessFromAssociation(ctx, "owner", "repo", "user", tt.association) + result := p.writeAccessFromAssociation(ctx, "owner", "repo", "user", tt.association) if result != tt.expected { t.Errorf("writeAccessFromAssociation(%s) = %d, want %d", tt.association, result, tt.expected) @@ -328,8 +330,8 @@ func TestRequiredChecksExtraction(t *testing.T) { }, } - c := &Client{} - checks := c.extractRequiredChecksFromGraphQL(data) + p := &Platform{} + checks := p.extractRequiredChecksFromGraphQL(data) // Should deduplicate and contain all unique checks expectedChecks := map[string]bool{ diff --git a/pkg/prx/graphql_query.go b/pkg/prx/github/graphql_query.go similarity index 99% rename from pkg/prx/graphql_query.go rename to pkg/prx/github/graphql_query.go index 99a95ce..4d0e9e7 100644 --- a/pkg/prx/graphql_query.go +++ b/pkg/prx/github/graphql_query.go @@ -1,4 +1,4 @@ -package prx +package github // completeGraphQLQuery is the GraphQL query that fetches all PR data. // This replaces 13+ REST API calls with a single comprehensive query. diff --git a/pkg/prx/graphql_types.go b/pkg/prx/github/graphql_types.go similarity index 99% rename from pkg/prx/graphql_types.go rename to pkg/prx/github/graphql_types.go index b540d87..4c62ae3 100644 --- a/pkg/prx/graphql_types.go +++ b/pkg/prx/github/graphql_types.go @@ -1,4 +1,4 @@ -package prx +package github import ( "strings" diff --git a/pkg/prx/github/platform.go b/pkg/prx/github/platform.go new file mode 100644 index 0000000..7874424 --- /dev/null +++ b/pkg/prx/github/platform.go @@ -0,0 +1,1107 @@ +package github + +import ( + "context" + "fmt" + "log/slog" + "net/http" + "slices" + "sort" + "strings" + "time" + + "github.com/codeGROOVE-dev/fido" + "github.com/codeGROOVE-dev/prx/pkg/prx" +) + +const ( + // HTTP client configuration constants. + maxIdleConns = 100 + maxIdleConnsPerHost = 10 + idleConnTimeoutSec = 90 + + // Cache TTL constants. + checkRunsCacheTTL = 20 * 24 * time.Hour // 20 days - validity checked against reference time + collaboratorsCacheTTL = 3 * time.Hour // 3 hours - repo-level, simple TTL + rulesetsCacheTTL = 3 * time.Hour // 3 hours - repo-level, simple TTL +) + +// cachedCheckRuns stores check run events with a timestamp for cache validation. +type cachedCheckRuns struct { + CachedAt time.Time + Events []prx.Event +} + +// Platform implements the prx.Platform interface for GitHub. +type Platform struct { + client *Client + logger *slog.Logger + collaboratorsCache *fido.Cache[string, map[string]string] + rulesetsCache *fido.Cache[string, []string] + checkRunsCache *fido.Cache[string, cachedCheckRuns] +} + +// Option configures a Platform. +type Option func(*Platform) + +// WithLogger sets a custom logger for the GitHub platform. +func WithLogger(logger *slog.Logger) Option { + return func(p *Platform) { + p.logger = logger + } +} + +// WithHTTPClient sets a custom HTTP client for the GitHub platform. +func WithHTTPClient(httpClient *http.Client) Option { + return func(p *Platform) { + if httpClient.Transport == nil { + httpClient.Transport = &Transport{Base: http.DefaultTransport} + } else if _, ok := httpClient.Transport.(*Transport); !ok { + httpClient.Transport = &Transport{Base: httpClient.Transport} + } + p.client = &Client{ + HTTPClient: httpClient, + Token: p.client.Token, + BaseURL: p.client.BaseURL, + } + } +} + +// WithBaseURL sets a custom base URL for the GitHub API. +func WithBaseURL(baseURL string) Option { + return func(p *Platform) { + p.client.BaseURL = baseURL + } +} + +// NewTestPlatform creates a Platform for testing with a custom base URL. +// Exported for use in prx package tests. +func NewTestPlatform(token, baseURL string) *Platform { + return NewPlatform(token, WithBaseURL(baseURL)) +} + +// NewPlatform creates a new GitHub platform client. +func NewPlatform(token string, opts ...Option) *Platform { + transport := &http.Transport{ + MaxIdleConns: maxIdleConns, + MaxIdleConnsPerHost: maxIdleConnsPerHost, + IdleConnTimeout: idleConnTimeoutSec * time.Second, + DisableCompression: false, + DisableKeepAlives: false, + } + + p := &Platform{ + client: &Client{ + HTTPClient: &http.Client{ + Transport: &Transport{Base: transport}, + Timeout: 30 * time.Second, + }, + Token: token, + BaseURL: API, + }, + logger: slog.Default(), + collaboratorsCache: fido.New[string, map[string]string](fido.TTL(collaboratorsCacheTTL)), + rulesetsCache: fido.New[string, []string](fido.TTL(rulesetsCacheTTL)), + checkRunsCache: fido.New[string, cachedCheckRuns](fido.TTL(checkRunsCacheTTL)), + } + + for _, opt := range opts { + opt(p) + } + + return p +} + +// Name returns the platform identifier. +func (*Platform) Name() string { + return "github" +} + +// FetchPR retrieves a pull request with all events and metadata. +func (p *Platform) FetchPR(ctx context.Context, owner, repo string, number int, refTime time.Time) (*prx.PullRequestData, error) { + p.logger.InfoContext(ctx, "fetching pull request via GraphQL", "owner", owner, "repo", repo, "pr", number) + + prData, err := p.fetchPullRequestCompleteViaGraphQL(ctx, owner, repo, number) + if err != nil { + return nil, fmt.Errorf("GraphQL query failed: %w", err) + } + + additionalRequired, err := p.fetchRulesetsREST(ctx, owner, repo) + if err != nil { + p.logger.WarnContext(ctx, "failed to fetch rulesets", "error", err) + } else if prData.PullRequest.CheckSummary != nil && len(additionalRequired) > 0 { + p.logger.InfoContext(ctx, "added required checks from rulesets", "count", len(additionalRequired)) + } + + existingRequired := p.existingRequiredChecks(prData) + existingRequired = append(existingRequired, additionalRequired...) + + checkRunEvents := p.fetchAllCheckRunsREST(ctx, owner, repo, prData, refTime) + + for i := range checkRunEvents { + if slices.Contains(existingRequired, checkRunEvents[i].Body) { + checkRunEvents[i].Required = true + } + } + + prData.Events = append(prData.Events, checkRunEvents...) + + if len(checkRunEvents) > 0 { + p.recalculateCheckSummaryWithCheckRuns(ctx, prData, checkRunEvents) + } + + p.logger.InfoContext(ctx, "fetched check runs via REST", "count", len(checkRunEvents)) + + sort.Slice(prData.Events, func(i, j int) bool { + return prData.Events[i].Timestamp.Before(prData.Events[j].Timestamp) + }) + + apiCallsUsed := 2 + if len(checkRunEvents) > 0 { + apiCallsUsed++ + } + + p.logger.InfoContext(ctx, "successfully fetched pull request via hybrid GraphQL+REST", + "owner", owner, "repo", repo, "pr", number, + "event_count", len(prData.Events), + "api_calls_made", fmt.Sprintf("%d (vs 13+ with REST)", apiCallsUsed)) + + return prData, nil +} + +// fetchPullRequestCompleteViaGraphQL fetches all PR data in a single GraphQL query. +func (p *Platform) fetchPullRequestCompleteViaGraphQL(ctx context.Context, owner, repo string, prNumber int) (*prx.PullRequestData, error) { + data, err := p.executeGraphQL(ctx, owner, repo, prNumber) + if err != nil { + return nil, err + } + + pr := p.convertGraphQLToPullRequest(ctx, data, owner, repo) + events := p.convertGraphQLToEventsComplete(ctx, data, owner, repo) + requiredChecks := p.extractRequiredChecksFromGraphQL(data) + + events = prx.FilterEvents(events) + sort.Slice(events, func(i, j int) bool { + return events[i].Timestamp.Before(events[j].Timestamp) + }) + prx.UpgradeWriteAccess(events) + + testState := p.calculateTestStateFromGraphQL(data) + prx.FinalizePullRequest(&pr, events, requiredChecks, testState) + + return &prx.PullRequestData{ + PullRequest: pr, + Events: events, + }, nil +} + +// executeGraphQL executes the GraphQL query and handles errors. +func (p *Platform) executeGraphQL(ctx context.Context, owner, repo string, prNumber int) (*graphQLPullRequestComplete, error) { + variables := map[string]any{ + "owner": owner, + "repo": repo, + "number": prNumber, + } + + var result graphQLCompleteResponse + if err := p.client.GraphQL(ctx, completeGraphQLQuery, variables, &result); err != nil { + return nil, err + } + + if len(result.Errors) > 0 { + var errMsgs []string + var hasPermissionError bool + for _, e := range result.Errors { + errMsgs = append(errMsgs, e.Message) + msg := strings.ToLower(e.Message) + if strings.Contains(msg, "not accessible by integration") || + strings.Contains(msg, "resource not accessible") || + strings.Contains(msg, "forbidden") || + strings.Contains(msg, "insufficient permissions") || + strings.Contains(msg, "requires authentication") { + hasPermissionError = true + } + } + + errStr := strings.Join(errMsgs, "; ") + if result.Data.Repository.PullRequest.Number == 0 { + if hasPermissionError { + return nil, fmt.Errorf( + "fetching PR %s/%s#%d via GraphQL failed due to insufficient permissions: %s "+ + "(note: some fields like branchProtectionRule or refUpdateRule may require push access "+ + "even on public repositories; check token scopes or try using a token with 'repo' or 'public_repo' scope)", + owner, repo, prNumber, errStr) + } + return nil, fmt.Errorf("fetching PR %s/%s#%d via GraphQL: %s", owner, repo, prNumber, errStr) + } + + if hasPermissionError { + p.logger.WarnContext(ctx, "GraphQL query returned permission errors but PR data was retrieved - some fields may be missing", + "owner", owner, + "repo", repo, + "pr", prNumber, + "errors", errStr, + "note", "fields like branchProtectionRule or refUpdateRule require push access") + } else { + p.logger.WarnContext(ctx, "GraphQL query returned errors but PR data was retrieved", + "owner", owner, + "repo", repo, + "pr", prNumber, + "errors", errStr) + } + } + + return &result.Data.Repository.PullRequest, nil +} + +// convertGraphQLToPullRequest converts GraphQL data to PullRequest. +func (p *Platform) convertGraphQLToPullRequest(ctx context.Context, data *graphQLPullRequestComplete, owner, repo string) prx.PullRequest { + pr := prx.PullRequest{ + Number: data.Number, + Title: data.Title, + Body: prx.Truncate(data.Body), + Author: data.Author.Login, + State: strings.ToLower(data.State), + CreatedAt: data.CreatedAt, + UpdatedAt: data.UpdatedAt, + Draft: data.IsDraft, + Additions: data.Additions, + Deletions: data.Deletions, + ChangedFiles: data.ChangedFiles, + HeadSHA: data.HeadRef.Target.OID, + } + + if data.ClosedAt != nil { + pr.ClosedAt = data.ClosedAt + } + if data.MergedAt != nil { + pr.MergedAt = data.MergedAt + pr.Merged = true + } + if data.MergedBy != nil { + pr.MergedBy = data.MergedBy.Login + } + + switch data.MergeStateStatus { + case "CLEAN": + pr.MergeableState = "clean" + case "UNSTABLE": + pr.MergeableState = "unstable" + case "BLOCKED": + pr.MergeableState = "blocked" + case "BEHIND": + pr.MergeableState = "behind" + case "DIRTY": + pr.MergeableState = "dirty" + default: + pr.MergeableState = strings.ToLower(data.MergeStateStatus) + } + + if data.Author.Login != "" { + pr.AuthorWriteAccess = p.writeAccessFromAssociation(ctx, owner, repo, data.Author.Login, data.AuthorAssociation) + pr.AuthorBot = isBot(data.Author) + } + + pr.Assignees = make([]string, 0) + for _, assignee := range data.Assignees.Nodes { + pr.Assignees = append(pr.Assignees, assignee.Login) + } + + for _, label := range data.Labels.Nodes { + pr.Labels = append(pr.Labels, label.Name) + } + + for _, node := range data.Commits.Nodes { + pr.Commits = append(pr.Commits, node.Commit.OID) + } + + pr.Reviewers = buildReviewersMap(data) + + return pr +} + +// convertGraphQLToEventsComplete converts GraphQL data to Events. +func (p *Platform) convertGraphQLToEventsComplete(ctx context.Context, data *graphQLPullRequestComplete, owner, repo string) []prx.Event { + var events []prx.Event + + events = append(events, prx.Event{ + Kind: prx.EventKindPROpened, + Timestamp: data.CreatedAt, + Actor: data.Author.Login, + Body: prx.Truncate(data.Body), + Bot: isBot(data.Author), + WriteAccess: p.writeAccessFromAssociation(ctx, owner, repo, data.Author.Login, data.AuthorAssociation), + }) + + for _, node := range data.Commits.Nodes { + event := prx.Event{ + Kind: prx.EventKindCommit, + Timestamp: node.Commit.CommittedDate, + Body: node.Commit.OID, + Description: prx.Truncate(node.Commit.Message), + } + if node.Commit.Author.User != nil { + event.Actor = node.Commit.Author.User.Login + event.Bot = isBot(*node.Commit.Author.User) + } else { + event.Actor = node.Commit.Author.Name + } + events = append(events, event) + } + + for i := range data.Reviews.Nodes { + review := &data.Reviews.Nodes[i] + if review.State == "" { + continue + } + timestamp := review.CreatedAt + if review.SubmittedAt != nil { + timestamp = *review.SubmittedAt + } + event := prx.Event{ + Kind: prx.EventKindReview, + Timestamp: timestamp, + Actor: review.Author.Login, + Body: prx.Truncate(review.Body), + Outcome: strings.ToLower(review.State), + Question: prx.ContainsQuestion(review.Body), + Bot: isBot(review.Author), + WriteAccess: p.writeAccessFromAssociation(ctx, owner, repo, review.Author.Login, review.AuthorAssociation), + } + events = append(events, event) + } + + for i := range data.ReviewThreads.Nodes { + thread := &data.ReviewThreads.Nodes[i] + for j := range thread.Comments.Nodes { + comment := &thread.Comments.Nodes[j] + event := prx.Event{ + Kind: prx.EventKindReviewComment, + Timestamp: comment.CreatedAt, + Actor: comment.Author.Login, + Body: prx.Truncate(comment.Body), + Question: prx.ContainsQuestion(comment.Body), + Bot: isBot(comment.Author), + WriteAccess: p.writeAccessFromAssociation(ctx, owner, repo, comment.Author.Login, comment.AuthorAssociation), + Outdated: comment.Outdated, + } + events = append(events, event) + } + } + + for _, comment := range data.Comments.Nodes { + event := prx.Event{ + Kind: prx.EventKindComment, + Timestamp: comment.CreatedAt, + Actor: comment.Author.Login, + Body: prx.Truncate(comment.Body), + Question: prx.ContainsQuestion(comment.Body), + Bot: isBot(comment.Author), + WriteAccess: p.writeAccessFromAssociation(ctx, owner, repo, comment.Author.Login, comment.AuthorAssociation), + } + events = append(events, event) + } + + if data.HeadRef.Target.StatusCheckRollup != nil { + for i := range data.HeadRef.Target.StatusCheckRollup.Contexts.Nodes { + node := &data.HeadRef.Target.StatusCheckRollup.Contexts.Nodes[i] + switch node.TypeName { + case "CheckRun": + var description string + switch { + case node.Title != "" && node.Summary != "": + description = fmt.Sprintf("%s: %s", node.Title, node.Summary) + case node.Title != "": + description = node.Title + case node.Summary != "": + description = node.Summary + default: + // No description available + } + + if node.StartedAt != nil { + events = append(events, prx.Event{ + Kind: prx.EventKindCheckRun, + Timestamp: *node.StartedAt, + Body: node.Name, + Outcome: strings.ToLower(node.Status), + Bot: true, + Description: description, + }) + } + + if node.CompletedAt != nil { + events = append(events, prx.Event{ + Kind: prx.EventKindCheckRun, + Timestamp: *node.CompletedAt, + Body: node.Name, + Outcome: strings.ToLower(node.Conclusion), + Bot: true, + Description: description, + }) + } + + case "StatusContext": + if node.CreatedAt == nil { + continue + } + event := prx.Event{ + Kind: prx.EventKindStatusCheck, + Timestamp: *node.CreatedAt, + Outcome: strings.ToLower(node.State), + Body: node.Context, + Description: node.Description, + } + if node.Creator != nil { + event.Actor = node.Creator.Login + event.Bot = isBot(*node.Creator) + } + events = append(events, event) + default: + // Skip unknown status check types + } + } + } + + for _, item := range data.TimelineItems.Nodes { + event := p.parseGraphQLTimelineEvent(ctx, item, owner, repo) + if event != nil { + events = append(events, *event) + } + } + + if data.ClosedAt != nil && !data.IsDraft { + event := prx.Event{ + Kind: prx.EventKindPRClosed, + Timestamp: *data.ClosedAt, + } + if data.MergedBy != nil { + event.Actor = data.MergedBy.Login + event.Kind = prx.EventKindPRMerged + event.Bot = isBot(*data.MergedBy) + } + events = append(events, event) + } + + return events +} + +// parseGraphQLTimelineEvent parses a single timeline event. +// +//nolint:gocognit,maintidx,revive // High complexity justified - must handle all GitHub timeline event types +func (*Platform) parseGraphQLTimelineEvent(_ context.Context, item map[string]any, _, _ string) *prx.Event { + typename, ok := item["__typename"].(string) + if !ok { + return nil + } + + getTime := func(key string) *time.Time { + if str, ok := item[key].(string); ok { + if t, err := time.Parse(time.RFC3339, str); err == nil { + return &t + } + } + return nil + } + + getActor := func() string { + if actor, ok := item["actor"].(map[string]any); ok { + if login, ok := actor["login"].(string); ok { + return login + } + } + return "unknown" + } + + isActorBot := func() bool { + if actor, ok := item["actor"].(map[string]any); ok { + var actorObj graphQLActor + if login, ok := actor["login"].(string); ok { + actorObj.Login = login + } + if id, ok := actor["id"].(string); ok { + actorObj.ID = id + } + if typ, ok := actor["__typename"].(string); ok { + actorObj.Type = typ + } + return isBot(actorObj) + } + return false + } + + createdAt := getTime("createdAt") + if createdAt == nil { + return nil + } + + event := &prx.Event{ + Timestamp: *createdAt, + Actor: getActor(), + Bot: isActorBot(), + } + + switch typename { + case "AssignedEvent": + event.Kind = prx.EventKindAssigned + if assignee, ok := item["assignee"].(map[string]any); ok { + if login, ok := assignee["login"].(string); ok { + event.Target = login + } + } + + case "UnassignedEvent": + event.Kind = prx.EventKindUnassigned + if assignee, ok := item["assignee"].(map[string]any); ok { + if login, ok := assignee["login"].(string); ok { + event.Target = login + } + } + + case "LabeledEvent": + event.Kind = prx.EventKindLabeled + if label, ok := item["label"].(map[string]any); ok { + if name, ok := label["name"].(string); ok { + event.Target = name + } + } + + case "UnlabeledEvent": + event.Kind = prx.EventKindUnlabeled + if label, ok := item["label"].(map[string]any); ok { + if name, ok := label["name"].(string); ok { + event.Target = name + } + } + + case "MilestonedEvent": + event.Kind = prx.EventKindMilestoned + if title, ok := item["milestoneTitle"].(string); ok { + event.Target = title + } + + case "DemilestonedEvent": + event.Kind = prx.EventKindDemilestoned + if title, ok := item["milestoneTitle"].(string); ok { + event.Target = title + } + + case "ReviewRequestedEvent": + event.Kind = prx.EventKindReviewRequested + if reviewer, ok := item["requestedReviewer"].(map[string]any); ok { + if login, ok := reviewer["login"].(string); ok { + event.Target = login + } else if name, ok := reviewer["name"].(string); ok { + event.Target = name + } + } + + case "ReviewRequestRemovedEvent": + event.Kind = prx.EventKindReviewRequestRemoved + if reviewer, ok := item["requestedReviewer"].(map[string]any); ok { + if login, ok := reviewer["login"].(string); ok { + event.Target = login + } else if name, ok := reviewer["name"].(string); ok { + event.Target = name + } + } + + case "MentionedEvent": + event.Kind = prx.EventKindMentioned + event.Body = "User was mentioned" + + case "ReadyForReviewEvent": + event.Kind = prx.EventKindReadyForReview + + case "ConvertToDraftEvent": + event.Kind = prx.EventKindConvertToDraft + + case "ClosedEvent": + event.Kind = prx.EventKindClosed + + case "ReopenedEvent": + event.Kind = prx.EventKindReopened + + case "MergedEvent": + event.Kind = prx.EventKindMerged + + case "AutoMergeEnabledEvent": + event.Kind = prx.EventKindAutoMergeEnabled + + case "AutoMergeDisabledEvent": + event.Kind = prx.EventKindAutoMergeDisabled + + case "ReviewDismissedEvent": + event.Kind = prx.EventKindReviewDismissed + if msg, ok := item["dismissalMessage"].(string); ok { + event.Body = msg + } + + case "BaseRefChangedEvent": + event.Kind = prx.EventKindBaseRefChanged + + case "BaseRefForcePushedEvent": + event.Kind = prx.EventKindBaseRefForcePushed + + case "HeadRefForcePushedEvent": + event.Kind = prx.EventKindHeadRefForcePushed + + case "HeadRefDeletedEvent": + event.Kind = prx.EventKindHeadRefDeleted + + case "HeadRefRestoredEvent": + event.Kind = prx.EventKindHeadRefRestored + + case "RenamedTitleEvent": + event.Kind = prx.EventKindRenamedTitle + if prev, ok := item["previousTitle"].(string); ok { + if curr, ok := item["currentTitle"].(string); ok { + event.Body = fmt.Sprintf("Renamed from %q to %q", prev, curr) + } + } + + case "LockedEvent": + event.Kind = prx.EventKindLocked + + case "UnlockedEvent": + event.Kind = prx.EventKindUnlocked + + case "AddedToMergeQueueEvent": + event.Kind = prx.EventKindAddedToMergeQueue + + case "RemovedFromMergeQueueEvent": + event.Kind = prx.EventKindRemovedFromMergeQueue + + case "AutomaticBaseChangeSucceededEvent": + event.Kind = prx.EventKindAutomaticBaseChangeSucceeded + + case "AutomaticBaseChangeFailedEvent": + event.Kind = prx.EventKindAutomaticBaseChangeFailed + + case "ConnectedEvent": + event.Kind = prx.EventKindConnected + + case "DisconnectedEvent": + event.Kind = prx.EventKindDisconnected + + case "CrossReferencedEvent": + event.Kind = prx.EventKindCrossReferenced + + case "ReferencedEvent": + event.Kind = prx.EventKindReferenced + + case "SubscribedEvent": + event.Kind = prx.EventKindSubscribed + + case "UnsubscribedEvent": + event.Kind = prx.EventKindUnsubscribed + + case "DeployedEvent": + event.Kind = prx.EventKindDeployed + + case "DeploymentEnvironmentChangedEvent": + event.Kind = prx.EventKindDeploymentEnvironmentChanged + + case "PinnedEvent": + event.Kind = prx.EventKindPinned + + case "UnpinnedEvent": + event.Kind = prx.EventKindUnpinned + + case "TransferredEvent": + event.Kind = prx.EventKindTransferred + + case "UserBlockedEvent": + event.Kind = prx.EventKindUserBlocked + + default: + return nil + } + + return event +} + +// writeAccessFromAssociation calculates write access from association. +func (p *Platform) writeAccessFromAssociation(ctx context.Context, owner, repo, user, association string) int { + if user == "" { + return prx.WriteAccessNA + } + + switch association { + case "OWNER", "COLLABORATOR": + return prx.WriteAccessDefinitely + case "MEMBER": + return p.checkCollaboratorPermission(ctx, owner, repo, user) + case "CONTRIBUTOR", "NONE", "FIRST_TIME_CONTRIBUTOR", "FIRST_TIMER": + return prx.WriteAccessUnlikely + default: + return prx.WriteAccessNA + } +} + +// checkCollaboratorPermission checks if a user has write access. +func (p *Platform) checkCollaboratorPermission(ctx context.Context, owner, repo, user string) int { + collabs, err := p.collaboratorsCache.Fetch(collaboratorsCacheKey(owner, repo), func() (map[string]string, error) { + result, fetchErr := p.client.Collaborators(ctx, owner, repo) + if fetchErr != nil { + p.logger.WarnContext(ctx, "failed to fetch collaborators for write access check", + "owner", owner, + "repo", repo, + "user", user, + "error", fetchErr) + return nil, fetchErr + } + return result, nil + }) + if err != nil { + return prx.WriteAccessLikely + } + + switch collabs[user] { + case "admin", "maintain", "write": + return prx.WriteAccessDefinitely + case "read", "triage", "none": + return prx.WriteAccessNo + default: + return prx.WriteAccessUnlikely + } +} + +// extractRequiredChecksFromGraphQL gets required checks from GraphQL response. +func (*Platform) extractRequiredChecksFromGraphQL(data *graphQLPullRequestComplete) []string { + seen := make(map[string]bool) + + if data.BaseRef.RefUpdateRule != nil { + for _, c := range data.BaseRef.RefUpdateRule.RequiredStatusCheckContexts { + seen[c] = true + } + } + + if data.BaseRef.BranchProtectionRule != nil { + for _, c := range data.BaseRef.BranchProtectionRule.RequiredStatusCheckContexts { + seen[c] = true + } + } + + checks := make([]string, 0, len(seen)) + for c := range seen { + checks = append(checks, c) + } + return checks +} + +// calculateTestStateFromGraphQL determines test state from check runs. +func (*Platform) calculateTestStateFromGraphQL(data *graphQLPullRequestComplete) string { + if data.HeadRef.Target.StatusCheckRollup == nil { + return "" + } + + var hasFailure, hasRunning, hasQueued bool + + for i := range data.HeadRef.Target.StatusCheckRollup.Contexts.Nodes { + node := &data.HeadRef.Target.StatusCheckRollup.Contexts.Nodes[i] + if node.TypeName != "CheckRun" { + continue + } + + if !strings.Contains(strings.ToLower(node.Name), "test") && + !strings.Contains(strings.ToLower(node.Name), "check") && + !strings.Contains(strings.ToLower(node.Name), "ci") { + continue + } + + switch strings.ToLower(node.Status) { + case "queued": + hasQueued = true + case "in_progress": + hasRunning = true + default: + // Other statuses don't affect state calculation + } + + switch strings.ToLower(node.Conclusion) { + case "failure", "timed_out", "action_required": + hasFailure = true + default: + // Other conclusions don't indicate failure + } + } + + if hasFailure { + return "failing" + } + if hasRunning { + return "running" + } + if hasQueued { + return "queued" + } + return "passing" +} + +// truncateSHA returns the first 7 characters of a SHA, or the full string if shorter. +func truncateSHA(sha string) string { + if len(sha) > 7 { + return sha[:7] + } + return sha +} + +// buildReviewersMap constructs a map of reviewer login to their review state. +func buildReviewersMap(data *graphQLPullRequestComplete) map[string]prx.ReviewState { + reviewers := make(map[string]prx.ReviewState) + + for _, request := range data.ReviewRequests.Nodes { + reviewer := request.RequestedReviewer + if reviewer.Login != "" { + reviewers[reviewer.Login] = prx.ReviewStatePending + } else if reviewer.Name != "" { + reviewers[reviewer.Name] = prx.ReviewStatePending + } + } + + for i := range data.Reviews.Nodes { + review := &data.Reviews.Nodes[i] + if review.Author.Login == "" { + continue + } + + var state prx.ReviewState + switch strings.ToUpper(review.State) { + case "APPROVED": + state = prx.ReviewStateApproved + case "CHANGES_REQUESTED": + state = prx.ReviewStateChangesRequested + case "COMMENTED": + state = prx.ReviewStateCommented + default: + continue + } + + reviewers[review.Author.Login] = state + } + + return reviewers +} + +// fetchRulesetsREST fetches repository rulesets via REST API. +func (p *Platform) fetchRulesetsREST(ctx context.Context, owner, repo string) ([]string, error) { + cacheKey := rulesetsCacheKey(owner, repo) + + return p.rulesetsCache.Fetch(cacheKey, func() ([]string, error) { + path := fmt.Sprintf("/repos/%s/%s/rulesets", owner, repo) + var rulesets []Ruleset + + if _, err := p.client.Get(ctx, path, &rulesets); err != nil { + return nil, err + } + + var required []string + for _, rs := range rulesets { + if rs.Target != "branch" { + continue + } + for _, rule := range rs.Rules { + if rule.Type == "required_status_checks" && rule.Parameters.RequiredStatusChecks != nil { + for _, chk := range rule.Parameters.RequiredStatusChecks { + required = append(required, chk.Context) + } + } + } + } + + p.logger.InfoContext(ctx, "fetched required checks from rulesets", + "owner", owner, "repo", repo, "count", len(required), "checks", required) + + return required, nil + }) +} + +// fetchCheckRunsREST fetches check runs via REST API for a specific commit. +func (p *Platform) fetchCheckRunsREST(ctx context.Context, owner, repo, sha string, refTime time.Time) ([]prx.Event, error) { + if sha == "" { + return nil, nil + } + + cacheKey := checkRunsCacheKey(owner, repo, sha) + + if cached, ok := p.checkRunsCache.Get(cacheKey); ok { + if !cached.CachedAt.Before(refTime) { + p.logger.InfoContext(ctx, "cache hit: check runs", + "owner", owner, "repo", repo, "sha", truncateSHA(sha), "count", len(cached.Events)) + return cached.Events, nil + } + p.logger.InfoContext(ctx, "cache miss: check runs expired", + "owner", owner, "repo", repo, "sha", truncateSHA(sha), + "cached_at", cached.CachedAt, "reference_time", refTime) + } + + path := fmt.Sprintf("/repos/%s/%s/commits/%s/check-runs?per_page=100", owner, repo, sha) + var checkRuns CheckRuns + if _, err := p.client.Get(ctx, path, &checkRuns); err != nil { + return nil, fmt.Errorf("fetching check runs: %w", err) + } + + var events []prx.Event + for _, run := range checkRuns.CheckRuns { + if run == nil { + continue + } + + var timestamp time.Time + var outcome string + + switch { + case !run.CompletedAt.IsZero(): + timestamp = run.CompletedAt + outcome = strings.ToLower(run.Conclusion) + case !run.StartedAt.IsZero(): + timestamp = run.StartedAt + outcome = strings.ToLower(run.Status) + default: + continue + } + + event := prx.Event{ + Kind: prx.EventKindCheckRun, + Timestamp: timestamp, + Actor: "github", + Bot: true, + Body: run.Name, + Outcome: outcome, + } + + switch { + case run.Output.Title != "" && run.Output.Summary != "": + event.Description = fmt.Sprintf("%s: %s", run.Output.Title, run.Output.Summary) + case run.Output.Title != "": + event.Description = run.Output.Title + case run.Output.Summary != "": + event.Description = run.Output.Summary + default: + // No description available + } + + events = append(events, event) + } + + p.checkRunsCache.Set(cacheKey, cachedCheckRuns{ + Events: events, + CachedAt: time.Now(), + }) + + p.logger.InfoContext(ctx, "fetched check runs from API", + "owner", owner, "repo", repo, "sha", truncateSHA(sha), "count", len(events)) + + return events, nil +} + +// fetchAllCheckRunsREST fetches check runs for all commits in the PR. +func (p *Platform) fetchAllCheckRunsREST(ctx context.Context, owner, repo string, prData *prx.PullRequestData, refTime time.Time) []prx.Event { + shas := make(map[string]bool) + + if prData.PullRequest.HeadSHA != "" { + shas[prData.PullRequest.HeadSHA] = true + } + + for i := range prData.Events { + e := &prData.Events[i] + if e.Kind == prx.EventKindCommit && e.Body != "" { + shas[e.Body] = true + } + } + + var all []prx.Event + seen := make(map[string]bool) + + for sha := range shas { + events, err := p.fetchCheckRunsREST(ctx, owner, repo, sha, refTime) + if err != nil { + p.logger.WarnContext(ctx, "failed to fetch check runs for commit", "sha", sha, "error", err) + continue + } + + for i := range events { + ev := &events[i] + key := fmt.Sprintf("%s:%s", ev.Body, ev.Timestamp.Format(time.RFC3339Nano)) + if !seen[key] { + seen[key] = true + ev.Target = sha + all = append(all, *ev) + } + } + } + + return all +} + +// existingRequiredChecks extracts required checks that were already identified. +func (*Platform) existingRequiredChecks(prData *prx.PullRequestData) []string { + var required []string + + for i := range prData.Events { + e := &prData.Events[i] + if e.Required && (e.Kind == prx.EventKindCheckRun || e.Kind == prx.EventKindStatusCheck) { + required = append(required, e.Body) + } + } + + if prData.PullRequest.CheckSummary != nil { + for chk := range prData.PullRequest.CheckSummary.Pending { + if !slices.Contains(required, chk) { + required = append(required, chk) + } + } + } + + return required +} + +// recalculateCheckSummaryWithCheckRuns updates the check summary with REST-fetched check runs. +func (p *Platform) recalculateCheckSummaryWithCheckRuns(_ context.Context, prData *prx.PullRequestData, _ []prx.Event) { + var required []string + if prData.PullRequest.CheckSummary != nil { + for chk := range prData.PullRequest.CheckSummary.Pending { + required = append(required, chk) + } + } + + prData.PullRequest.CheckSummary = prx.CalculateCheckSummary(prData.Events, required) + prData.PullRequest.TestState = p.calculateTestStateFromCheckSummary(prData.PullRequest.CheckSummary) +} + +// calculateTestStateFromCheckSummary determines test state from a CheckSummary. +func (*Platform) calculateTestStateFromCheckSummary(summary *prx.CheckSummary) string { + if summary == nil { + return prx.TestStateNone + } + + if len(summary.Failing) > 0 { + return prx.TestStateFailing + } + + if len(summary.Pending) > 0 { + return prx.TestStatePending + } + + if len(summary.Success) > 0 { + return prx.TestStatePassing + } + + return prx.TestStateNone +} + +// collaboratorsCacheKey generates a cache key for collaborators data. +func collaboratorsCacheKey(owner, repo string) string { + return fmt.Sprintf("%s/%s", owner, repo) +} + +// rulesetsCacheKey generates a cache key for rulesets data. +func rulesetsCacheKey(owner, repo string) string { + return fmt.Sprintf("%s/%s", owner, repo) +} + +// checkRunsCacheKey generates a cache key for check runs data. +func checkRunsCacheKey(owner, repo, sha string) string { + return fmt.Sprintf("%s/%s/%s", owner, repo, sha) +} diff --git a/pkg/prx/github/platform_test.go b/pkg/prx/github/platform_test.go new file mode 100644 index 0000000..078a072 --- /dev/null +++ b/pkg/prx/github/platform_test.go @@ -0,0 +1,376 @@ +//nolint:errcheck // Test handlers don't need to check w.Write errors +package github + +import ( + "context" + "log/slog" + "net/http" + "net/http/httptest" + "os" + "testing" +) + +func TestWithLogger(t *testing.T) { + logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + platform := NewPlatform("test-token", WithLogger(logger)) + + if platform.logger != logger { + t.Error("Expected custom logger to be set") + } +} + +func TestWithHTTPClient(t *testing.T) { + customClient := &http.Client{ + Timeout: 60, + } + + platform := NewPlatform("test-token", WithHTTPClient(customClient)) + + if platform.client.HTTPClient == nil { + t.Error("Expected HTTP client to be set") + } + + // Verify transport was wrapped + if _, ok := platform.client.HTTPClient.Transport.(*Transport); !ok { + t.Error("Expected transport to be wrapped with retry Transport") + } +} + +func TestWithHTTPClient_ExistingTransport(t *testing.T) { + customTransport := &http.Transport{} + customClient := &http.Client{ + Transport: customTransport, + } + + platform := NewPlatform("test-token", WithHTTPClient(customClient)) + + // Verify existing transport was wrapped + if transport, ok := platform.client.HTTPClient.Transport.(*Transport); !ok { + t.Error("Expected transport to be wrapped") + } else if transport.Base != customTransport { + t.Error("Expected base transport to be the custom transport") + } +} + +func TestWithHTTPClient_AlreadyWrapped(t *testing.T) { + wrappedTransport := &Transport{Base: http.DefaultTransport} + customClient := &http.Client{ + Transport: wrappedTransport, + } + + platform := NewPlatform("test-token", WithHTTPClient(customClient)) + + // Verify transport is not double-wrapped + if transport, ok := platform.client.HTTPClient.Transport.(*Transport); !ok { + t.Error("Expected transport to remain as Transport") + } else if transport != wrappedTransport { + t.Error("Expected transport to not be double-wrapped") + } +} + +func TestCalculateTestStateFromGraphQL(t *testing.T) { + tests := []struct { + name string + data *graphQLPullRequestComplete + wantState string + }{ + { + name: "no status check rollup", + data: &graphQLPullRequestComplete{}, + wantState: "", + }, + { + name: "failing test", + data: &graphQLPullRequestComplete{ + HeadRef: makeHeadRef([]graphQLStatusCheckNode{ + { + TypeName: "CheckRun", + Name: "unit-tests", + Status: "completed", + Conclusion: "failure", + }, + }), + }, + wantState: "failing", + }, + { + name: "running test", + data: &graphQLPullRequestComplete{ + HeadRef: makeHeadRef([]graphQLStatusCheckNode{ + { + TypeName: "CheckRun", + Name: "test-check", + Status: "in_progress", + }, + }), + }, + wantState: "running", + }, + { + name: "queued test", + data: &graphQLPullRequestComplete{ + HeadRef: makeHeadRef([]graphQLStatusCheckNode{ + { + TypeName: "CheckRun", + Name: "CI-test", + Status: "queued", + }, + }), + }, + wantState: "queued", + }, + { + name: "passing tests", + data: &graphQLPullRequestComplete{ + HeadRef: makeHeadRef([]graphQLStatusCheckNode{ + { + TypeName: "CheckRun", + Name: "test-suite", + Status: "completed", + Conclusion: "success", + }, + }), + }, + wantState: "passing", + }, + { + name: "non-test check run ignored", + data: &graphQLPullRequestComplete{ + HeadRef: makeHeadRef([]graphQLStatusCheckNode{ + { + TypeName: "CheckRun", + Name: "lint", + Status: "completed", + Conclusion: "failure", + }, + }), + }, + wantState: "passing", + }, + { + name: "non-CheckRun type ignored", + data: &graphQLPullRequestComplete{ + HeadRef: makeHeadRef([]graphQLStatusCheckNode{ + { + TypeName: "StatusContext", + Name: "test-status", + Status: "completed", + Conclusion: "failure", + }, + }), + }, + wantState: "passing", + }, + { + name: "timed out test", + data: &graphQLPullRequestComplete{ + HeadRef: makeHeadRef([]graphQLStatusCheckNode{ + { + TypeName: "CheckRun", + Name: "test-timeout", + Status: "completed", + Conclusion: "timed_out", + }, + }), + }, + wantState: "failing", + }, + { + name: "action required test", + data: &graphQLPullRequestComplete{ + HeadRef: makeHeadRef([]graphQLStatusCheckNode{ + { + TypeName: "CheckRun", + Name: "check-required", + Status: "completed", + Conclusion: "action_required", + }, + }), + }, + wantState: "failing", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &Platform{} + state := p.calculateTestStateFromGraphQL(tt.data) + if state != tt.wantState { + t.Errorf("Expected state %q, got %q", tt.wantState, state) + } + }) + } +} + +// Helper function to create HeadRef with status check nodes +func makeHeadRef(nodes []graphQLStatusCheckNode) struct { + Target struct { + StatusCheckRollup *struct { + Contexts struct { + Nodes []graphQLStatusCheckNode `json:"nodes"` + } `json:"contexts"` + State string `json:"state"` + } `json:"statusCheckRollup"` + OID string `json:"oid"` + } `json:"target"` + Name string `json:"name"` +} { + return struct { + Target struct { + StatusCheckRollup *struct { + Contexts struct { + Nodes []graphQLStatusCheckNode `json:"nodes"` + } `json:"contexts"` + State string `json:"state"` + } `json:"statusCheckRollup"` + OID string `json:"oid"` + } `json:"target"` + Name string `json:"name"` + }{ + Target: struct { + StatusCheckRollup *struct { + Contexts struct { + Nodes []graphQLStatusCheckNode `json:"nodes"` + } `json:"contexts"` + State string `json:"state"` + } `json:"statusCheckRollup"` + OID string `json:"oid"` + }{ + StatusCheckRollup: &struct { + Contexts struct { + Nodes []graphQLStatusCheckNode `json:"nodes"` + } `json:"contexts"` + State string `json:"state"` + }{ + Contexts: struct { + Nodes []graphQLStatusCheckNode `json:"nodes"` + }{ + Nodes: nodes, + }, + }, + }, + } +} + +func TestExecuteGraphQL_ErrorHandling(t *testing.T) { + tests := []struct { + name string + serverHandler http.HandlerFunc + wantErr bool + }{ + { + name: "graphql error in response", + serverHandler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"errors": [{"message": "Field 'badField' doesn't exist"}]}`)) + }, + wantErr: true, + }, + { + name: "successful response with pr data", + serverHandler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{ + "data": { + "repository": { + "pullRequest": { + "id": "PR_123", + "number": 1, + "title": "Test PR", + "body": "Test body", + "state": "OPEN", + "mergeable": "MERGEABLE", + "mergeStateStatus": "CLEAN", + "authorAssociation": "OWNER", + "isDraft": false, + "additions": 10, + "deletions": 5, + "changedFiles": 2, + "createdAt": "2024-01-01T00:00:00Z", + "updatedAt": "2024-01-02T00:00:00Z", + "author": {"login": "testuser"}, + "assignees": {"nodes": []}, + "labels": {"nodes": []}, + "reviewRequests": {"nodes": []}, + "baseRef": { + "name": "main", + "target": {"oid": "abc123"} + }, + "headRef": { + "name": "feature", + "target": {"oid": "def456"} + }, + "reviews": {"nodes": []}, + "timelineItems": {"nodes": [], "pageInfo": {"hasNextPage": false}}, + "latestReviews": {"nodes": []} + } + }, + "rateLimit": { + "cost": 1, + "remaining": 5000, + "limit": 5000, + "resetAt": "2024-01-01T01:00:00Z" + } + } + }`)) + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := httptest.NewServer(tt.serverHandler) + defer server.Close() + + platform := NewTestPlatform("test-token", server.URL) + + _, err := platform.executeGraphQL(context.Background(), "owner", "repo", 1) + + if tt.wantErr && err == nil { + t.Error("Expected error but got none") + } + if !tt.wantErr && err != nil { + t.Errorf("Unexpected error: %v", err) + } + }) + } +} + +func TestTruncateSHA(t *testing.T) { + tests := []struct { + name string + sha string + wantSHA string + }{ + { + name: "long sha", + sha: "1234567890abcdef", + wantSHA: "1234567", + }, + { + name: "short sha", + sha: "12345", + wantSHA: "12345", + }, + { + name: "exactly 7 chars", + sha: "1234567", + wantSHA: "1234567", + }, + { + name: "empty sha", + sha: "", + wantSHA: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := truncateSHA(tt.sha) + if result != tt.wantSHA { + t.Errorf("Expected %q, got %q", tt.wantSHA, result) + } + }) + } +} diff --git a/pkg/prx/github/testdata/gitmdm_pr15.json b/pkg/prx/github/testdata/gitmdm_pr15.json new file mode 100644 index 0000000..1e79cf6 --- /dev/null +++ b/pkg/prx/github/testdata/gitmdm_pr15.json @@ -0,0 +1 @@ +{"events":[{"timestamp":"2025-10-06T11:25:40Z","kind":"pr_opened","actor":"dependabot","body":"Bumps the actions group with 1 update: [github/codeql-action](https://github.com/github/codeql-action).\n\nUpdates `github/codeql-action` from 3.30.5 to 3.30.6\n\u003cdetails\u003e\n\u003csummary\u003eRelease notes\u003c/summary\u003e\n\u003cp\u003e\u003cem\u003eSourced from \u003ca href=\"https://github.com/github/","write_access":-1},{"timestamp":"2025-10-06T11:25:40Z","kind":"commit","actor":"dependabot[bot]","body":"Bump github/codeql-action from 3.30.5 to 3.30.6 in the actions group\n\nBumps the actions group with 1 update: [github/codeql-action](https://github.com/github/codeql-action).\n\n\nUpdates `github/codeql-action` from 3.30.5 to 3.30.6\n- [Release notes](https://g","bot":true},{"timestamp":"2025-10-06T11:25:41Z","kind":"labeled","actor":"dependabot","target":"github_actions"},{"timestamp":"2025-10-06T11:25:41Z","kind":"labeled","actor":"dependabot","target":"dependencies"},{"timestamp":"2025-10-06T11:25:42Z","kind":"labeled","actor":"dependabot","target":"github_actions"},{"timestamp":"2025-10-06T11:25:42Z","kind":"labeled","actor":"dependabot","target":"dependencies"},{"timestamp":"2025-10-06T11:25:52Z","kind":"check_run","actor":"","outcome":"success","body":"dependency-review","bot":true},{"timestamp":"2025-10-06T11:25:52Z","kind":"check_run","actor":"github","outcome":"success","body":"dependency-review","bot":true},{"timestamp":"2025-10-06T11:26:24Z","kind":"check_run","actor":"","outcome":"success","body":"Kusari Inspector","description":"Security Analysis Passed: No security issues found","bot":true},{"timestamp":"2025-10-06T11:26:24Z","kind":"comment","actor":"kusari-inspector","body":"![Kusari Inspector](https://cdn.prod.website-files.com/645bae6cd3ac4d56631d637f/68a88788b10ca50fa44181d9_inspector-banner-dark.svg)\n\n#### Kusari Analysis Results:\n![Proceed with these changes](https://cdn.prod.website-files.com/645bae6cd3ac4d56631d637f/68a","write_access":-1,"bot":true,"question":true},{"timestamp":"2025-10-06T11:26:24Z","kind":"check_run","actor":"github","outcome":"success","body":"Kusari Inspector","description":"Security Analysis Passed: No security issues found","bot":true},{"timestamp":"2025-10-06T11:26:54Z","kind":"check_run","actor":"","outcome":"success","body":"test","bot":true},{"timestamp":"2025-10-06T11:26:54Z","kind":"check_run","actor":"github","outcome":"success","body":"test","bot":true},{"timestamp":"2025-10-06T11:27:02Z","kind":"check_run","actor":"","outcome":"success","body":"CodeQL","description":"No new alerts in code changed by this pull request: [View all branch alerts](/codeGROOVE-dev/gitMDM/security/code-scanning?query=pr%3A15+tool%3ACodeQL+is%3Aopen).","bot":true},{"timestamp":"2025-10-06T11:27:02Z","kind":"check_run","actor":"github","outcome":"success","body":"CodeQL","description":"No new alerts in code changed by this pull request: [View all branch alerts](/codeGROOVE-dev/gitMDM/security/code-scanning?query=pr%3A15+tool%3ACodeQL+is%3Aopen).","bot":true},{"timestamp":"2025-10-06T11:27:09Z","kind":"check_run","actor":"github","outcome":"success","body":"analyze","bot":true},{"timestamp":"2025-10-06T11:27:09Z","kind":"check_run","actor":"","outcome":"success","body":"analyze","bot":true},{"timestamp":"2025-10-06T12:27:08Z","kind":"review_requested","actor":"ready-to-review-beta","target":"tstromberg"},{"timestamp":"2025-10-07T17:29:24Z","kind":"auto_merge_enabled","actor":"tstromberg"},{"timestamp":"2025-10-07T20:00:04Z","kind":"review","actor":"tstromberg","outcome":"approved","write_access":2}],"pull_request":{"created_at":"2025-10-06T11:25:40Z","updated_at":"2025-10-07T20:00:04Z","approval_summary":{"approvals_with_write_access":1,"approvals_with_unknown_access":0,"approvals_without_write_access":0,"changes_requested":0},"check_summary":{"success":{"CodeQL":"No new alerts in code changed by this pull request: [View all branch alerts](/codeGROOVE-dev/gitMDM/security/code-scanning?query=pr%3A15+tool%3ACodeQL+is%3Aopen).","Kusari Inspector":"Security Analysis Passed: No security issues found","analyze":"success","dependency-review":"success","test":"success"},"failing":{},"pending":{},"cancelled":{},"skipped":{},"stale":{},"neutral":{}},"mergeable":false,"labels":["dependencies","github_actions"],"mergeable_state":"blocked","mergeable_state_description":"PR is blocked by required status checks, reviews, or branch protection rules","author":"dependabot","body":"Bumps the actions group with 1 update: [github/codeql-action](https://github.com/github/codeql-action).\n\nUpdates `github/codeql-action` from 3.30.5 to 3.30.6\n\u003cdetails\u003e\n\u003csummary\u003eRelease notes\u003c/summary\u003e\n\u003cp\u003e\u003cem\u003eSourced from \u003ca href=\"https://github.com/github/","title":"Bump github/codeql-action from 3.30.5 to 3.30.6 in the actions group","state":"open","test_state":"passing","head_sha":"77743db426cc19982b2ec1c68a683367eba377b6","number":15,"changed_files":1,"deletions":3,"additions":3,"author_write_access":-1,"author_bot":false,"merged":false,"draft":false}} diff --git a/pkg/prx/timeline_events_test.go b/pkg/prx/github/timeline_events_test.go similarity index 94% rename from pkg/prx/timeline_events_test.go rename to pkg/prx/github/timeline_events_test.go index c7ce199..00d8bbc 100644 --- a/pkg/prx/timeline_events_test.go +++ b/pkg/prx/github/timeline_events_test.go @@ -1,4 +1,4 @@ -package prx +package github import ( "context" @@ -6,6 +6,8 @@ import ( "os" "testing" "time" + + "github.com/codeGROOVE-dev/prx/pkg/prx" ) // TestAutoMergeEventIntegration tests that we properly parse auto_merge_enabled @@ -17,7 +19,7 @@ func TestAutoMergeEventIntegration(t *testing.T) { t.Fatalf("Failed to read test data: %v", err) } - var prData PullRequestData + var prData prx.PullRequestData if err := json.Unmarshal(data, &prData); err != nil { t.Fatalf("Failed to unmarshal test data: %v", err) } @@ -28,7 +30,7 @@ func TestAutoMergeEventIntegration(t *testing.T) { } // Find the auto_merge_enabled event - var autoMergeEvent *Event + var autoMergeEvent *prx.Event for i := range prData.Events { if prData.Events[i].Kind == "auto_merge_enabled" { autoMergeEvent = &prData.Events[i] @@ -95,7 +97,7 @@ func TestAutoMergeEventIntegration(t *testing.T) { // TestParseGraphQLTimelineEventAutoMerge tests parsing of auto-merge events func TestParseGraphQLTimelineEventAutoMerge(t *testing.T) { - c := &Client{} + p := &Platform{} tests := []struct { name string @@ -130,7 +132,7 @@ func TestParseGraphQLTimelineEventAutoMerge(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - event := c.parseGraphQLTimelineEvent(context.TODO(), tt.item, "owner", "repo") + event := p.parseGraphQLTimelineEvent(context.TODO(), tt.item, "owner", "repo") if event == nil { t.Fatal("Expected event, got nil") } @@ -146,7 +148,7 @@ func TestParseGraphQLTimelineEventAutoMerge(t *testing.T) { // TestParseGraphQLTimelineEventNewTypes tests parsing of all newly added event types func TestParseGraphQLTimelineEventNewTypes(t *testing.T) { - c := &Client{} + p := &Platform{} tests := []struct { typename string @@ -190,7 +192,7 @@ func TestParseGraphQLTimelineEventNewTypes(t *testing.T) { }, } - event := c.parseGraphQLTimelineEvent(context.TODO(), item, "owner", "repo") + event := p.parseGraphQLTimelineEvent(context.TODO(), item, "owner", "repo") if event == nil { t.Fatalf("Expected event for %s, got nil", tt.typename) } @@ -203,7 +205,7 @@ func TestParseGraphQLTimelineEventNewTypes(t *testing.T) { // TestParseGraphQLTimelineEventRenamedTitle tests that renamed title events include title info func TestParseGraphQLTimelineEventRenamedTitle(t *testing.T) { - c := &Client{} + p := &Platform{} item := map[string]any{ "__typename": "RenamedTitleEvent", @@ -216,7 +218,7 @@ func TestParseGraphQLTimelineEventRenamedTitle(t *testing.T) { }, } - event := c.parseGraphQLTimelineEvent(context.TODO(), item, "owner", "repo") + event := p.parseGraphQLTimelineEvent(context.TODO(), item, "owner", "repo") if event == nil { t.Fatal("Expected event, got nil") } @@ -233,7 +235,7 @@ func TestParseGraphQLTimelineEventRenamedTitle(t *testing.T) { // TestParseGraphQLTimelineEventReviewDismissed tests that review dismissed events include message func TestParseGraphQLTimelineEventReviewDismissed(t *testing.T) { - c := &Client{} + p := &Platform{} item := map[string]any{ "__typename": "ReviewDismissedEvent", @@ -245,7 +247,7 @@ func TestParseGraphQLTimelineEventReviewDismissed(t *testing.T) { }, } - event := c.parseGraphQLTimelineEvent(context.TODO(), item, "owner", "repo") + event := p.parseGraphQLTimelineEvent(context.TODO(), item, "owner", "repo") if event == nil { t.Fatal("Expected event, got nil") } diff --git a/pkg/prx/gitlab/platform.go b/pkg/prx/gitlab/platform.go new file mode 100644 index 0000000..7d93f04 --- /dev/null +++ b/pkg/prx/gitlab/platform.go @@ -0,0 +1,936 @@ +// Package gitlab provides a GitLab platform implementation for fetching +// merge request data from GitLab instances. +package gitlab + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "sort" + "strings" + "time" + + "github.com/codeGROOVE-dev/fido" + "github.com/codeGROOVE-dev/prx/pkg/prx" +) + +// Cache TTL constants. +const ( + mrDataCacheTTL = 20 * 24 * time.Hour // 20 days - validity checked against reference time +) + +// Cached data types with timestamps for reference time validation. +type cachedApprovals struct { + Data *approvals + CachedAt time.Time +} + +//nolint:govet // fieldalignment: cache structs prioritize readability over memory layout +type cachedPipelines struct { + Data []pipeline + CachedAt time.Time +} + +//nolint:govet // fieldalignment: cache structs prioritize readability over memory layout +type cachedNotes struct { + Data []note + CachedAt time.Time +} + +//nolint:govet // fieldalignment: cache structs prioritize readability over memory layout +type cachedDiscussions struct { + Data []discussion + CachedAt time.Time +} + +//nolint:govet // fieldalignment: cache structs prioritize readability over memory layout +type cachedCommits struct { + Data []commit + CachedAt time.Time +} + +// Platform implements the prx.Platform interface for GitLab. +// +//nolint:govet // fieldalignment: struct fields ordered for clarity +type Platform struct { + logger *slog.Logger + httpClient *http.Client + token string + baseURL string + approvalsCache *fido.Cache[string, cachedApprovals] + pipelinesCache *fido.Cache[string, cachedPipelines] + notesCache *fido.Cache[string, cachedNotes] + discussionsCache *fido.Cache[string, cachedDiscussions] + commitsCache *fido.Cache[string, cachedCommits] +} + +// Option configures a Platform. +type Option func(*Platform) + +// WithLogger sets a custom logger for the GitLab platform. +func WithLogger(logger *slog.Logger) Option { + return func(p *Platform) { + p.logger = logger + } +} + +// WithHTTPClient sets a custom HTTP client for the GitLab platform. +func WithHTTPClient(client *http.Client) Option { + return func(p *Platform) { + p.httpClient = client + } +} + +// WithBaseURL sets a custom base URL for self-hosted GitLab instances. +func WithBaseURL(baseURL string) Option { + return func(p *Platform) { + p.baseURL = strings.TrimSuffix(baseURL, "/") + } +} + +// NewPlatform creates a new GitLab platform client. +func NewPlatform(token string, opts ...Option) *Platform { + p := &Platform{ + httpClient: &http.Client{Timeout: 30 * time.Second}, + token: token, + baseURL: "https://gitlab.com", + logger: slog.Default(), + approvalsCache: fido.New[string, cachedApprovals](fido.TTL(mrDataCacheTTL)), + pipelinesCache: fido.New[string, cachedPipelines](fido.TTL(mrDataCacheTTL)), + notesCache: fido.New[string, cachedNotes](fido.TTL(mrDataCacheTTL)), + discussionsCache: fido.New[string, cachedDiscussions](fido.TTL(mrDataCacheTTL)), + commitsCache: fido.New[string, cachedCommits](fido.TTL(mrDataCacheTTL)), + } + + for _, opt := range opts { + opt(p) + } + + return p +} + +// Name returns the platform identifier. +func (*Platform) Name() string { + return prx.PlatformGitLab +} + +// FetchPR retrieves a merge request with all events and metadata. +func (p *Platform) FetchPR(ctx context.Context, owner, repo string, number int, refTime time.Time) (*prx.PullRequestData, error) { + projectPath := fmt.Sprintf("%s/%s", owner, repo) + p.logger.Info("fetching merge request via GitLab REST API", + "project", projectPath, "mr", number) + + // Fetch merge request details (not cached - contains updatedAt for reference). + mr, err := p.fetchMergeRequest(ctx, projectPath, number) + if err != nil { + return nil, fmt.Errorf("fetch merge request: %w", err) + } + + // Fetch approvals (cached with reference time validation). + approvals, err := p.fetchApprovals(ctx, projectPath, number, refTime) + if err != nil { + p.logger.Warn("failed to fetch approvals", "error", err) + } + + // Fetch pipelines (cached with reference time validation). + pipelines, err := p.fetchPipelines(ctx, projectPath, number, refTime) + if err != nil { + p.logger.Warn("failed to fetch pipelines", "error", err) + } + + // Fetch notes (cached with reference time validation). + notes, err := p.fetchNotes(ctx, projectPath, number, refTime) + if err != nil { + p.logger.Warn("failed to fetch notes", "error", err) + } + + // Fetch discussions (cached with reference time validation). + discussions, err := p.fetchDiscussions(ctx, projectPath, number, refTime) + if err != nil { + p.logger.Warn("failed to fetch discussions", "error", err) + } + + // Fetch commits (cached with reference time validation). + commits, err := p.fetchCommits(ctx, projectPath, number, refTime) + if err != nil { + p.logger.Warn("failed to fetch commits", "error", err) + } + + // Convert to our neutral format. + pr := convertMergeRequest(mr, approvals, commits) + events := convertToEvents(mr, notes, discussions, pipelines, commits) + + // Sort events by timestamp. + sort.Slice(events, func(i, j int) bool { + return events[i].Timestamp.Before(events[j].Timestamp) + }) + + // Finalize the pull request with calculated summaries. + // Pass the TestState we derived from the pipeline so it doesn't get overwritten. + prx.FinalizePullRequest(&pr, events, nil, pr.TestState) + + return &prx.PullRequestData{ + CachedAt: time.Now(), + PullRequest: pr, + Events: events, + }, nil +} + +// GitLab API response types. + +//nolint:govet // fieldalignment: JSON API structs prioritize readability over memory layout +type mergeRequest struct { + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Author user `json:"author"` + MergedAt *time.Time `json:"merged_at"` + ClosedAt *time.Time `json:"closed_at"` + MergedBy *user `json:"merged_by"` + DiffRefs *diffRefs `json:"diff_refs"` + HeadPipeline *pipeline `json:"head_pipeline"` + Assignees []user `json:"assignees"` + Reviewers []reviewerState `json:"reviewers"` + Labels []string `json:"labels"` + Title string `json:"title"` + Description string `json:"description"` + State string `json:"state"` + MergeStatus string `json:"merge_status"` + DetailedMergeStatus string `json:"detailed_merge_status"` + SHA string `json:"sha"` + ChangesCount string `json:"changes_count"` + SourceBranch string `json:"source_branch"` + TargetBranch string `json:"target_branch"` + WebURL string `json:"web_url"` + ID int `json:"id"` + IID int `json:"iid"` + UserNotesCount int `json:"user_notes_count"` + Draft bool `json:"draft"` + WorkInProgress bool `json:"work_in_progress"` + HasConflicts bool `json:"has_conflicts"` + MergeableDiscussions bool `json:"blocking_discussions_resolved"` +} + +type user struct { + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + ID int `json:"id"` +} + +//nolint:govet // fieldalignment: JSON API structs prioritize readability over memory layout +type reviewerState struct { + user + + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` +} + +type diffRefs struct { + BaseSHA string `json:"base_sha"` + HeadSHA string `json:"head_sha"` + StartSHA string `json:"start_sha"` +} + +type pipeline struct { + DetailedStatus *detailedStatus `json:"detailed_status"` + StartedAt *time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Status string `json:"status"` + Source string `json:"source"` + Ref string `json:"ref"` + SHA string `json:"sha"` + WebURL string `json:"web_url"` + ID int `json:"id"` +} + +type detailedStatus struct { + Icon string `json:"icon"` + Text string `json:"text"` + Label string `json:"label"` + Group string `json:"group"` + Tooltip string `json:"tooltip"` + DetailsPath string `json:"details_path"` + HasDetails bool `json:"has_details"` +} + +type approvals struct { + ApprovedBy []approvalUser `json:"approved_by"` + SuggestedApprovers []user `json:"suggested_approvers"` + Approvers []user `json:"approvers"` + ApprovalsLeft int `json:"approvals_left"` + ApprovalsRequired int `json:"approvals_required"` + Approved bool `json:"approved"` +} + +type approvalUser struct { + User user `json:"user"` +} + +//nolint:govet // fieldalignment: JSON API structs prioritize readability over memory layout +type note struct { + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Author user `json:"author"` + ResolvedBy *user `json:"resolved_by"` + Body string `json:"body"` + NoteableType string `json:"noteable_type"` //nolint:misspell // GitLab API uses "noteable" + ID int `json:"id"` + NoteableID int `json:"noteable_id"` //nolint:misspell // GitLab API uses "noteable" + System bool `json:"system"` + Resolvable bool `json:"resolvable"` + Resolved bool `json:"resolved"` +} + +//nolint:govet // fieldalignment: JSON API structs prioritize readability over memory layout +type discussion struct { + Notes []note `json:"notes"` + ID string `json:"id"` + IndividualNote bool `json:"individual_note"` +} + +type commit struct { + ID string `json:"id"` + ShortID string `json:"short_id"` + Title string `json:"title"` + Message string `json:"message"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + AuthoredDate time.Time `json:"authored_date"` + CommitterName string `json:"committer_name"` + CommitterEmail string `json:"committer_email"` + CommittedDate time.Time `json:"committed_date"` + WebURL string `json:"web_url"` +} + +// API fetch methods. + +func (p *Platform) fetchMergeRequest(ctx context.Context, project string, mrIID int) (*mergeRequest, error) { + url := fmt.Sprintf("%s/api/v4/projects/%s/merge_requests/%d", + p.baseURL, urlEncode(project), mrIID) + + var mr mergeRequest + if err := p.doRequest(ctx, url, &mr); err != nil { + return nil, err + } + return &mr, nil +} + +func (p *Platform) fetchApprovals(ctx context.Context, project string, mrIID int, refTime time.Time) (*approvals, error) { + cacheKey := fmt.Sprintf("%s/%d/approvals", project, mrIID) + + if cached, ok := p.approvalsCache.Get(cacheKey); ok { + if !cached.CachedAt.Before(refTime) { + p.logger.DebugContext(ctx, "cache hit: approvals", "project", project, "mr", mrIID) + return cached.Data, nil + } + p.logger.DebugContext(ctx, "cache miss: approvals expired", + "project", project, "mr", mrIID, + "cached_at", cached.CachedAt, "reference_time", refTime) + } + + url := fmt.Sprintf("%s/api/v4/projects/%s/merge_requests/%d/approvals", + p.baseURL, urlEncode(project), mrIID) + + var a approvals + if err := p.doRequest(ctx, url, &a); err != nil { + return nil, err + } + + p.approvalsCache.Set(cacheKey, cachedApprovals{Data: &a, CachedAt: time.Now()}) + return &a, nil +} + +func (p *Platform) fetchPipelines(ctx context.Context, project string, mrIID int, refTime time.Time) ([]pipeline, error) { + cacheKey := fmt.Sprintf("%s/%d/pipelines", project, mrIID) + + if cached, ok := p.pipelinesCache.Get(cacheKey); ok { + if !cached.CachedAt.Before(refTime) { + p.logger.DebugContext(ctx, "cache hit: pipelines", "project", project, "mr", mrIID, "count", len(cached.Data)) + return cached.Data, nil + } + p.logger.DebugContext(ctx, "cache miss: pipelines expired", + "project", project, "mr", mrIID, + "cached_at", cached.CachedAt, "reference_time", refTime) + } + + url := fmt.Sprintf("%s/api/v4/projects/%s/merge_requests/%d/pipelines", + p.baseURL, urlEncode(project), mrIID) + + var pipelines []pipeline + if err := p.doRequest(ctx, url, &pipelines); err != nil { + return nil, err + } + + p.pipelinesCache.Set(cacheKey, cachedPipelines{Data: pipelines, CachedAt: time.Now()}) + return pipelines, nil +} + +func (p *Platform) fetchNotes(ctx context.Context, project string, mrIID int, refTime time.Time) ([]note, error) { + cacheKey := fmt.Sprintf("%s/%d/notes", project, mrIID) + + if cached, ok := p.notesCache.Get(cacheKey); ok { + if !cached.CachedAt.Before(refTime) { + p.logger.DebugContext(ctx, "cache hit: notes", "project", project, "mr", mrIID, "count", len(cached.Data)) + return cached.Data, nil + } + p.logger.DebugContext(ctx, "cache miss: notes expired", + "project", project, "mr", mrIID, + "cached_at", cached.CachedAt, "reference_time", refTime) + } + + url := fmt.Sprintf("%s/api/v4/projects/%s/merge_requests/%d/notes?sort=asc&per_page=100", + p.baseURL, urlEncode(project), mrIID) + + var notes []note + if err := p.doRequest(ctx, url, ¬es); err != nil { + return nil, err + } + + p.notesCache.Set(cacheKey, cachedNotes{Data: notes, CachedAt: time.Now()}) + return notes, nil +} + +func (p *Platform) fetchDiscussions(ctx context.Context, project string, mrIID int, refTime time.Time) ([]discussion, error) { + cacheKey := fmt.Sprintf("%s/%d/discussions", project, mrIID) + + if cached, ok := p.discussionsCache.Get(cacheKey); ok { + if !cached.CachedAt.Before(refTime) { + p.logger.DebugContext(ctx, "cache hit: discussions", "project", project, "mr", mrIID, "count", len(cached.Data)) + return cached.Data, nil + } + p.logger.DebugContext(ctx, "cache miss: discussions expired", + "project", project, "mr", mrIID, + "cached_at", cached.CachedAt, "reference_time", refTime) + } + + url := fmt.Sprintf("%s/api/v4/projects/%s/merge_requests/%d/discussions?per_page=100", + p.baseURL, urlEncode(project), mrIID) + + var discussions []discussion + if err := p.doRequest(ctx, url, &discussions); err != nil { + return nil, err + } + + p.discussionsCache.Set(cacheKey, cachedDiscussions{Data: discussions, CachedAt: time.Now()}) + return discussions, nil +} + +func (p *Platform) fetchCommits(ctx context.Context, project string, mrIID int, refTime time.Time) ([]commit, error) { + cacheKey := fmt.Sprintf("%s/%d/commits", project, mrIID) + + if cached, ok := p.commitsCache.Get(cacheKey); ok { + if !cached.CachedAt.Before(refTime) { + p.logger.DebugContext(ctx, "cache hit: commits", "project", project, "mr", mrIID, "count", len(cached.Data)) + return cached.Data, nil + } + p.logger.DebugContext(ctx, "cache miss: commits expired", + "project", project, "mr", mrIID, + "cached_at", cached.CachedAt, "reference_time", refTime) + } + + url := fmt.Sprintf("%s/api/v4/projects/%s/merge_requests/%d/commits", + p.baseURL, urlEncode(project), mrIID) + + var commits []commit + if err := p.doRequest(ctx, url, &commits); err != nil { + return nil, err + } + + p.commitsCache.Set(cacheKey, cachedCommits{Data: commits, CachedAt: time.Now()}) + return commits, nil +} + +func (p *Platform) doRequest(ctx context.Context, url string, result any) (err error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + + if p.token != "" { + // Support both PAT (Private-Token) and OAuth2 (Bearer) tokens + // OAuth2 tokens from glab are typically longer and don't start with "glpat-" + if strings.HasPrefix(p.token, "glpat-") { + req.Header.Set("Private-Token", p.token) + } else { + req.Header.Set("Authorization", "Bearer "+p.token) + } + } + req.Header.Set("Accept", "application/json") + + p.logger.Debug("GitLab API request", "url", url) + + resp, err := p.httpClient.Do(req) + if err != nil { + return fmt.Errorf("execute request: %w", err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil && err == nil { + err = fmt.Errorf("close response body: %w", cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + body, readErr := io.ReadAll(resp.Body) + if readErr != nil { + return fmt.Errorf("gitlab API error: %d %s (failed to read body: %w)", resp.StatusCode, resp.Status, readErr) + } + return fmt.Errorf("gitlab API error: %d %s: %s", resp.StatusCode, resp.Status, string(body)) + } + + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return fmt.Errorf("decode response: %w", err) + } + + return nil +} + +// Conversion methods. + +func convertMergeRequest(mr *mergeRequest, approvals *approvals, commits []commit) prx.PullRequest { + pr := prx.PullRequest{ + Number: mr.IID, + Title: mr.Title, + Body: mr.Description, + Author: mr.Author.Username, + State: convertState(mr.State), + Draft: mr.Draft || mr.WorkInProgress, + CreatedAt: mr.CreatedAt, + UpdatedAt: mr.UpdatedAt, + ClosedAt: mr.ClosedAt, + MergedAt: mr.MergedAt, + Labels: mr.Labels, + AuthorBot: isBot(&mr.Author), + } + + // Populate commits list (oldest to newest). + for i := range commits { + pr.Commits = append(pr.Commits, commits[i].ShortID) + } + + // Set merged status. + if mr.MergedAt != nil { + pr.Merged = true + if mr.MergedBy != nil { + pr.MergedBy = mr.MergedBy.Username + } + } + + // Set head SHA. + if mr.DiffRefs != nil { + pr.HeadSHA = mr.DiffRefs.HeadSHA + } else { + pr.HeadSHA = mr.SHA + } + + // Set assignees. + for _, a := range mr.Assignees { + pr.Assignees = append(pr.Assignees, a.Username) + } + + // Set reviewers with their states. + pr.Reviewers = make(map[string]prx.ReviewState) + for _, r := range mr.Reviewers { + pr.Reviewers[r.Username] = convertReviewerState(r.State) + } + + // Update reviewer states from approvals. + if approvals != nil { + for _, ab := range approvals.ApprovedBy { + pr.Reviewers[ab.User.Username] = prx.ReviewStateApproved + } + } + + // Set mergeable state. + pr.MergeableState = convertMergeStatus(mr) + mergeable := !mr.HasConflicts && mr.MergeStatus == "can_be_merged" + pr.Mergeable = &mergeable + + // Derive TestState from head pipeline. + if mr.HeadPipeline != nil { + pr.TestState = convertPipelineToTestState(mr.HeadPipeline.Status) + } + + return pr +} + +// isBot returns true if the user appears to be a bot. +func isBot(u *user) bool { + if u == nil { + return false + } + // GitLab bot users typically have "[bot]" suffix or specific usernames + return strings.HasSuffix(u.Username, "[bot]") || + strings.HasSuffix(u.Username, "-bot") || + u.Username == "ghost" // GitLab's placeholder for deleted users +} + +// convertPipelineToTestState converts a GitLab pipeline status to TestState. +func convertPipelineToTestState(status string) string { + switch status { + case "success": + return prx.TestStatePassing + case "failed": + return prx.TestStateFailing + case "running": + return prx.TestStateRunning + case "pending", "waiting_for_resource", "preparing": + return prx.TestStatePending + case "created", "scheduled": + return prx.TestStateQueued + default: + // canceled, skipped, manual, or unknown status + return prx.TestStateNone + } +} + +func convertState(state string) string { + switch state { + case "opened": + return "open" + case "closed", "merged": + return "closed" + default: + return state + } +} + +func convertReviewerState(state string) prx.ReviewState { + switch state { + case "reviewed": + return prx.ReviewStateCommented + default: + return prx.ReviewStatePending + } +} + +func convertMergeStatus(mr *mergeRequest) string { + // Map GitLab detailed merge status to our neutral format. + switch mr.DetailedMergeStatus { + case "mergeable": + return "clean" + case "ci_must_pass", "ci_still_running", "discussions_not_resolved", "not_approved": + return "blocked" + case "conflict", "need_rebase": + return "dirty" + case "checking": + return "unknown" + case "draft_status": + return "draft" + default: + // Fall back to simple merge status. + switch mr.MergeStatus { + case "can_be_merged": + return "clean" + case "cannot_be_merged": + return "dirty" + default: + return "unknown" + } + } +} + +func convertToEvents( + mr *mergeRequest, + notes []note, + discussions []discussion, + pipelines []pipeline, + commits []commit, +) []prx.Event { + var events []prx.Event + + // Add MR opened event. + events = append(events, prx.Event{ + Timestamp: mr.CreatedAt, + Kind: prx.EventKindPROpened, + Actor: mr.Author.Username, + }) + + // Add commit events. + for i := range commits { + events = append(events, prx.Event{ + Timestamp: commits[i].AuthoredDate, + Kind: prx.EventKindCommit, + Actor: commits[i].AuthorName, + Body: commits[i].ShortID, + Description: commits[i].Title, + }) + } + + // Track which notes are part of discussions to avoid duplicates. + discussionNoteIDs := make(map[int]bool) + for i := range discussions { + for j := range discussions[i].Notes { + discussionNoteIDs[discussions[i].Notes[j].ID] = true + } + } + + // Add events from notes (comments and system events). + for i := range notes { + // Skip notes that are part of discussions (we'll handle them separately). + if discussionNoteIDs[notes[i].ID] { + continue + } + + event := convertNote(¬es[i]) + if event != nil { + events = append(events, *event) + } + } + + // Add events from discussions. + for i := range discussions { + for j := range discussions[i].Notes { + event := convertNote(&discussions[i].Notes[j]) + if event != nil { + // Mark resolved discussions. + if discussions[i].Notes[j].Resolved { + event.Outdated = true + } + events = append(events, *event) + } + } + } + + // Add pipeline events (CI status). + for i := range pipelines { + events = append(events, convertPipeline(&pipelines[i])...) + } + + // Add closed/merged events. + if mr.MergedAt != nil { + events = append(events, prx.Event{ + Timestamp: *mr.MergedAt, + Kind: prx.EventKindPRMerged, + Actor: safeUsername(mr.MergedBy), + }) + } else if mr.ClosedAt != nil && mr.State == "closed" { + events = append(events, prx.Event{ + Timestamp: *mr.ClosedAt, + Kind: prx.EventKindPRClosed, + }) + } + + return events +} + +func convertNote(n *note) *prx.Event { + if n.System { + return convertSystemNote(n) + } + + // Regular user comment. + return &prx.Event{ + Timestamp: n.CreatedAt, + Kind: prx.EventKindComment, + Actor: n.Author.Username, + Body: n.Body, + Question: prx.ContainsQuestion(n.Body), + Outdated: n.Resolved, + } +} + +func convertSystemNote(systemNote *note) *prx.Event { + body := strings.ToLower(systemNote.Body) + + // Map GitLab system notes to our event kinds. + switch { + case strings.HasPrefix(body, "approved this merge request"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindReview, + Actor: systemNote.Author.Username, + Outcome: "approved", + } + case strings.HasPrefix(body, "unapproved this merge request"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindReview, + Actor: systemNote.Author.Username, + Outcome: "dismissed", + } + case strings.HasPrefix(body, "requested review from"): + target := extractMentionFromNote(systemNote.Body) + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindReviewRequested, + Actor: systemNote.Author.Username, + Target: target, + } + case strings.HasPrefix(body, "assigned to"): + target := extractMentionFromNote(systemNote.Body) + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindAssigned, + Actor: systemNote.Author.Username, + Target: target, + } + case strings.HasPrefix(body, "unassigned"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindUnassigned, + Actor: systemNote.Author.Username, + } + case strings.HasPrefix(body, "added") && strings.Contains(body, "label"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindLabeled, + Actor: systemNote.Author.Username, + Description: systemNote.Body, + } + case strings.HasPrefix(body, "removed") && strings.Contains(body, "label"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindUnlabeled, + Actor: systemNote.Author.Username, + Description: systemNote.Body, + } + case strings.HasPrefix(body, "marked as a draft"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindConvertToDraft, + Actor: systemNote.Author.Username, + } + case strings.HasPrefix(body, "marked this merge request as ready"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindReadyForReview, + Actor: systemNote.Author.Username, + } + case strings.HasPrefix(body, "changed target branch"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindBaseRefChanged, + Actor: systemNote.Author.Username, + Description: systemNote.Body, + } + case strings.HasPrefix(body, "mentioned in"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindCrossReferenced, + Actor: systemNote.Author.Username, + Description: systemNote.Body, + } + case strings.HasPrefix(body, "closed"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindClosed, + Actor: systemNote.Author.Username, + } + case strings.HasPrefix(body, "reopened"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindReopened, + Actor: systemNote.Author.Username, + } + case strings.HasPrefix(body, "changed title"): + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindRenamedTitle, + Actor: systemNote.Author.Username, + Description: systemNote.Body, + } + default: + // Unknown system note - include as a generic comment. + return &prx.Event{ + Timestamp: systemNote.CreatedAt, + Kind: prx.EventKindComment, + Actor: systemNote.Author.Username, + Body: systemNote.Body, + Description: "system", + } + } +} + +func convertPipeline(p *pipeline) []prx.Event { + var events []prx.Event + + // Add pipeline started event. + if p.StartedAt != nil { + status := "pending" + if p.Status == "running" { + status = "running" + } + events = append(events, prx.Event{ + Timestamp: *p.StartedAt, + Kind: prx.EventKindCheckRun, + Body: fmt.Sprintf("pipeline-%d", p.ID), + Outcome: status, + Bot: true, + Description: getDetailedStatusText(p.DetailedStatus), + }) + } + + // Add pipeline completed event. + if p.FinishedAt != nil { + outcome := convertPipelineStatus(p.Status) + events = append(events, prx.Event{ + Timestamp: *p.FinishedAt, + Kind: prx.EventKindCheckRun, + Body: fmt.Sprintf("pipeline-%d", p.ID), + Outcome: outcome, + Bot: true, + Description: getDetailedStatusText(p.DetailedStatus), + }) + } + + return events +} + +func convertPipelineStatus(status string) string { + switch status { + case "success": + return "success" + case "failed": + return "failure" + case "canceled", "cancelled": + return "cancelled" + case "skipped": + return "skipped" + case "running": + return "running" + case "pending", "created", "waiting_for_resource", "preparing": + return "pending" + case "manual": + return "action_required" + default: + return status + } +} + +// Helper functions. + +func urlEncode(s string) string { + // URL encode the project path (e.g., "group/project" -> "group%2Fproject"). + return strings.ReplaceAll(s, "/", "%2F") +} + +func extractMentionFromNote(body string) string { + // Extract @username from note body. + for p := range strings.FieldsSeq(body) { + if username, found := strings.CutPrefix(p, "@"); found { + return username + } + } + return "" +} + +func safeUsername(u *user) string { + if u == nil { + return "" + } + return u.Username +} + +func getDetailedStatusText(ds *detailedStatus) string { + if ds == nil { + return "" + } + return ds.Text +} diff --git a/pkg/prx/gitlab/platform_test.go b/pkg/prx/gitlab/platform_test.go new file mode 100644 index 0000000..d9793fe --- /dev/null +++ b/pkg/prx/gitlab/platform_test.go @@ -0,0 +1,1150 @@ +//nolint:errcheck // Test handlers don't need to check w.Write errors +package gitlab + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/codeGROOVE-dev/prx/pkg/prx" +) + +func TestPlatform_Name(t *testing.T) { + p := NewPlatform("token") + if got := p.Name(); got != prx.PlatformGitLab { + t.Errorf("Name() = %q, want %q", got, prx.PlatformGitLab) + } +} + +func TestNewPlatform(t *testing.T) { + p := NewPlatform("test-token") + if p.token != "test-token" { + t.Errorf("token = %q, want %q", p.token, "test-token") + } + if p.baseURL != "https://gitlab.com" { + t.Errorf("baseURL = %q, want %q", p.baseURL, "https://gitlab.com") + } +} + +func TestPlatform_WithOptions(t *testing.T) { + t.Run("WithLogger", func(t *testing.T) { + p := NewPlatform("token", WithLogger(nil)) + if p == nil { + t.Error("NewPlatform returned nil") + } + }) + + t.Run("WithHTTPClient", func(t *testing.T) { + customClient := &http.Client{Timeout: 60 * time.Second} + p := NewPlatform("token", WithHTTPClient(customClient)) + if p.httpClient != customClient { + t.Error("Custom HTTP client not set") + } + }) + + t.Run("WithBaseURL", func(t *testing.T) { + p := NewPlatform("token", WithBaseURL("https://gitlab.example.com/")) + if p.baseURL != "https://gitlab.example.com" { + t.Errorf("baseURL = %q, want %q", p.baseURL, "https://gitlab.example.com") + } + }) +} + +func TestPlatform_FetchPR(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + path := r.URL.Path + switch { + case strings.HasSuffix(path, "/merge_requests/123") && !strings.Contains(path, "/approvals") && !strings.Contains(path, "/pipelines") && !strings.Contains(path, "/notes") && !strings.Contains(path, "/discussions") && !strings.Contains(path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "id": 1, + "iid": 123, + "title": "Test MR", + "description": "Test description", + "state": "opened", + "draft": false, + "work_in_progress": false, + "has_conflicts": false, + "merge_status": "can_be_merged", + "detailed_merge_status": "mergeable", + "sha": "abc123def456", + "created_at": "2024-01-01T10:00:00Z", + "updated_at": "2024-01-02T12:00:00Z", + "author": { + "id": 1, + "username": "testauthor", + "name": "Test Author" + }, + "diff_refs": { + "head_sha": "abc123def456", + "base_sha": "base123", + "start_sha": "start123" + }, + "head_pipeline": { + "id": 100, + "status": "success", + "created_at": "2024-01-01T11:00:00Z", + "updated_at": "2024-01-01T12:00:00Z" + }, + "labels": ["bug", "priority::high"], + "assignees": [ + {"id": 2, "username": "assignee1"} + ], + "reviewers": [ + {"id": 3, "username": "reviewer1", "state": "unreviewed"} + ] + }`)) + + case strings.Contains(path, "/approvals"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "approved": true, + "approvals_required": 1, + "approvals_left": 0, + "approved_by": [ + {"user": {"id": 4, "username": "approver1"}} + ] + }`)) + + case strings.Contains(path, "/pipelines"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[ + { + "id": 100, + "status": "success", + "ref": "feature-branch", + "sha": "abc123def456", + "created_at": "2024-01-01T11:00:00Z", + "updated_at": "2024-01-01T12:00:00Z", + "started_at": "2024-01-01T11:05:00Z", + "finished_at": "2024-01-01T11:30:00Z" + } + ]`)) + + case strings.Contains(path, "/notes"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[ + { + "id": 1, + "body": "LGTM!", + "author": {"id": 5, "username": "commenter1"}, + "created_at": "2024-01-01T14:00:00Z", + "updated_at": "2024-01-01T14:00:00Z", + "system": false, + "resolvable": false, + "resolved": false + }, + { + "id": 2, + "body": "approved this merge request", + "author": {"id": 4, "username": "approver1"}, + "created_at": "2024-01-01T15:00:00Z", + "updated_at": "2024-01-01T15:00:00Z", + "system": true, + "resolvable": false, + "resolved": false + } + ]`)) + + case strings.Contains(path, "/discussions"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[ + { + "id": "disc1", + "individual_note": false, + "notes": [ + { + "id": 10, + "body": "Can you explain this?", + "author": {"id": 6, "username": "reviewer2"}, + "created_at": "2024-01-01T16:00:00Z", + "updated_at": "2024-01-01T16:00:00Z", + "system": false, + "resolvable": true, + "resolved": false + } + ] + } + ]`)) + + case strings.Contains(path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[ + { + "id": "abc123def456789full", + "short_id": "abc123d", + "title": "Initial commit", + "message": "Initial commit\n\nWith details", + "author_name": "Test Author", + "author_email": "test@example.com", + "authored_date": "2024-01-01T09:00:00Z", + "committer_name": "Test Author", + "committer_email": "test@example.com", + "committed_date": "2024-01-01T09:00:00Z" + } + ]`)) + + default: + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`{"message": "not found"}`)) + } + })) + defer server.Close() + + p := NewPlatform("test-token", WithBaseURL(server.URL)) + ctx := context.Background() + + data, err := p.FetchPR(ctx, "owner", "repo", 123, time.Now()) + if err != nil { + t.Fatalf("FetchPR() error = %v", err) + } + + pr := data.PullRequest + + // Verify basic PR fields + if pr.Number != 123 { + t.Errorf("Number = %d, want 123", pr.Number) + } + if pr.Title != "Test MR" { + t.Errorf("Title = %q, want %q", pr.Title, "Test MR") + } + if pr.Author != "testauthor" { + t.Errorf("Author = %q, want %q", pr.Author, "testauthor") + } + if pr.State != "open" { + t.Errorf("State = %q, want %q", pr.State, "open") + } + if pr.Draft { + t.Error("Draft = true, want false") + } + if pr.HeadSHA != "abc123def456" { + t.Errorf("HeadSHA = %q, want %q", pr.HeadSHA, "abc123def456") + } + if pr.TestState != prx.TestStatePassing { + t.Errorf("TestState = %q, want %q", pr.TestState, prx.TestStatePassing) + } + + // Verify labels + if len(pr.Labels) != 2 { + t.Errorf("len(Labels) = %d, want 2", len(pr.Labels)) + } + + // Verify assignees + if len(pr.Assignees) != 1 || pr.Assignees[0] != "assignee1" { + t.Errorf("Assignees = %v, want [assignee1]", pr.Assignees) + } + + // Verify reviewers + if pr.Reviewers["approver1"] != prx.ReviewStateApproved { + t.Errorf("Reviewers[approver1] = %v, want %v", pr.Reviewers["approver1"], prx.ReviewStateApproved) + } + + // Verify commits + if len(pr.Commits) != 1 || pr.Commits[0] != "abc123d" { + t.Errorf("Commits = %v, want [abc123d]", pr.Commits) + } + + // Verify events exist + if len(data.Events) < 3 { + t.Errorf("len(Events) = %d, want at least 3", len(data.Events)) + } + + // Check for expected event types + eventTypes := make(map[string]bool) + for _, e := range data.Events { + eventTypes[e.Kind] = true + } + expectedTypes := []string{prx.EventKindPROpened, prx.EventKindCommit, prx.EventKindComment} + for _, et := range expectedTypes { + if !eventTypes[et] { + t.Errorf("Missing event type %q in events", et) + } + } +} + +func TestPlatform_FetchPR_Merged(t *testing.T) { + mergedAt := "2024-01-03T15:00:00Z" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + path := r.URL.Path + switch { + case strings.HasSuffix(path, "/merge_requests/456") && !strings.Contains(path, "/approvals"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "id": 2, + "iid": 456, + "title": "Merged MR", + "description": "", + "state": "merged", + "draft": false, + "has_conflicts": false, + "merge_status": "can_be_merged", + "sha": "merged123", + "created_at": "2024-01-01T10:00:00Z", + "updated_at": "2024-01-03T15:00:00Z", + "merged_at": "` + mergedAt + `", + "closed_at": "` + mergedAt + `", + "author": {"id": 1, "username": "author"}, + "merged_by": {"id": 2, "username": "merger"}, + "diff_refs": {"head_sha": "merged123"}, + "labels": [], + "assignees": [], + "reviewers": [] + }`)) + case strings.Contains(path, "/approvals"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"approved": false, "approved_by": []}`)) + case strings.Contains(path, "/pipelines"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(path, "/notes"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(path, "/discussions"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + p := NewPlatform("test-token", WithBaseURL(server.URL)) + ctx := context.Background() + + data, err := p.FetchPR(ctx, "owner", "repo", 456, time.Now()) + if err != nil { + t.Fatalf("FetchPR() error = %v", err) + } + + pr := data.PullRequest + + if !pr.Merged { + t.Error("Merged = false, want true") + } + if pr.MergedBy != "merger" { + t.Errorf("MergedBy = %q, want %q", pr.MergedBy, "merger") + } + if pr.MergedAt == nil { + t.Error("MergedAt = nil, want non-nil") + } + + // Check for merged event + hasMergedEvent := false + for _, e := range data.Events { + if e.Kind == prx.EventKindPRMerged { + hasMergedEvent = true + if e.Actor != "merger" { + t.Errorf("Merged event actor = %q, want %q", e.Actor, "merger") + } + } + } + if !hasMergedEvent { + t.Error("Missing pr_merged event") + } +} + +func TestPlatform_FetchPR_Draft(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + path := r.URL.Path + switch { + case strings.HasSuffix(path, "/merge_requests/789") && !strings.Contains(path, "/approvals"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "id": 3, + "iid": 789, + "title": "Draft: WIP MR", + "description": "", + "state": "opened", + "draft": true, + "work_in_progress": true, + "has_conflicts": false, + "merge_status": "cannot_be_merged", + "detailed_merge_status": "draft_status", + "sha": "draft123", + "created_at": "2024-01-01T10:00:00Z", + "updated_at": "2024-01-01T10:00:00Z", + "author": {"id": 1, "username": "author"}, + "diff_refs": {"head_sha": "draft123"}, + "labels": [], + "assignees": [], + "reviewers": [] + }`)) + case strings.Contains(path, "/approvals"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"approved": false, "approved_by": []}`)) + case strings.Contains(path, "/pipelines"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(path, "/notes"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(path, "/discussions"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + p := NewPlatform("test-token", WithBaseURL(server.URL)) + ctx := context.Background() + + data, err := p.FetchPR(ctx, "owner", "repo", 789, time.Now()) + if err != nil { + t.Fatalf("FetchPR() error = %v", err) + } + + pr := data.PullRequest + + if !pr.Draft { + t.Error("Draft = false, want true") + } + if pr.MergeableState != "draft" { + t.Errorf("MergeableState = %q, want %q", pr.MergeableState, "draft") + } +} + +func TestPlatform_FetchPR_FailingPipeline(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + path := r.URL.Path + switch { + case strings.HasSuffix(path, "/merge_requests/101") && !strings.Contains(path, "/approvals"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "id": 4, + "iid": 101, + "title": "Failing Pipeline MR", + "state": "opened", + "sha": "fail123", + "created_at": "2024-01-01T10:00:00Z", + "updated_at": "2024-01-01T10:00:00Z", + "author": {"id": 1, "username": "author"}, + "diff_refs": {"head_sha": "fail123"}, + "head_pipeline": { + "id": 200, + "status": "failed" + }, + "labels": [], + "assignees": [], + "reviewers": [] + }`)) + case strings.Contains(path, "/approvals"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"approved": false, "approved_by": []}`)) + case strings.Contains(path, "/pipelines"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(path, "/notes"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(path, "/discussions"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + case strings.Contains(path, "/commits"): + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`[]`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + p := NewPlatform("test-token", WithBaseURL(server.URL)) + ctx := context.Background() + + data, err := p.FetchPR(ctx, "owner", "repo", 101, time.Now()) + if err != nil { + t.Fatalf("FetchPR() error = %v", err) + } + + if data.PullRequest.TestState != prx.TestStateFailing { + t.Errorf("TestState = %q, want %q", data.PullRequest.TestState, prx.TestStateFailing) + } +} + +func TestPlatform_FetchPR_APIError(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`{"message": "merge request not found"}`)) + })) + defer server.Close() + + p := NewPlatform("test-token", WithBaseURL(server.URL)) + ctx := context.Background() + + _, err := p.FetchPR(ctx, "owner", "repo", 999, time.Now()) + if err == nil { + t.Fatal("FetchPR() expected error for 404, got nil") + } + if !strings.Contains(err.Error(), "404") { + t.Errorf("Error should contain 404, got: %v", err) + } +} + +func TestPlatform_TokenAuth(t *testing.T) { + t.Run("PAT token uses Private-Token header", func(t *testing.T) { + var receivedHeader string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedHeader = r.Header.Get("Private-Token") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{}`)) + })) + defer server.Close() + + p := NewPlatform("glpat-test123", WithBaseURL(server.URL)) + _ = p.doRequest(context.Background(), server.URL+"/test", &struct{}{}) + + if receivedHeader != "glpat-test123" { + t.Errorf("Private-Token header = %q, want %q", receivedHeader, "glpat-test123") + } + }) + + t.Run("OAuth2 token uses Bearer header", func(t *testing.T) { + var receivedHeader string + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedHeader = r.Header.Get("Authorization") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{}`)) + })) + defer server.Close() + + p := NewPlatform("oauth2-token-here", WithBaseURL(server.URL)) + _ = p.doRequest(context.Background(), server.URL+"/test", &struct{}{}) + + if receivedHeader != "Bearer oauth2-token-here" { + t.Errorf("Authorization header = %q, want %q", receivedHeader, "Bearer oauth2-token-here") + } + }) +} + +func TestConvertState(t *testing.T) { + tests := []struct { + input string + want string + }{ + {"opened", "open"}, + {"closed", "closed"}, + {"merged", "closed"}, + {"unknown", "unknown"}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := convertState(tt.input) + if got != tt.want { + t.Errorf("convertState(%q) = %q, want %q", tt.input, got, tt.want) + } + }) + } +} + +func TestConvertReviewerState(t *testing.T) { + tests := []struct { + input string + want prx.ReviewState + }{ + {"reviewed", prx.ReviewStateCommented}, + {"unreviewed", prx.ReviewStatePending}, + {"", prx.ReviewStatePending}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := convertReviewerState(tt.input) + if got != tt.want { + t.Errorf("convertReviewerState(%q) = %v, want %v", tt.input, got, tt.want) + } + }) + } +} + +func TestConvertPipelineToTestState(t *testing.T) { + tests := []struct { + input string + want string + }{ + {"success", prx.TestStatePassing}, + {"failed", prx.TestStateFailing}, + {"running", prx.TestStateRunning}, + {"pending", prx.TestStatePending}, + {"waiting_for_resource", prx.TestStatePending}, + {"preparing", prx.TestStatePending}, + {"created", prx.TestStateQueued}, + {"scheduled", prx.TestStateQueued}, + {"canceled", prx.TestStateNone}, + {"skipped", prx.TestStateNone}, + {"manual", prx.TestStateNone}, + {"unknown", prx.TestStateNone}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := convertPipelineToTestState(tt.input) + if got != tt.want { + t.Errorf("convertPipelineToTestState(%q) = %q, want %q", tt.input, got, tt.want) + } + }) + } +} + +func TestConvertPipelineStatus(t *testing.T) { + tests := []struct { + input string + want string + }{ + {"success", "success"}, + {"failed", "failure"}, + {"canceled", "cancelled"}, + {"cancelled", "cancelled"}, + {"skipped", "skipped"}, + {"running", "running"}, + {"pending", "pending"}, + {"created", "pending"}, + {"waiting_for_resource", "pending"}, + {"preparing", "pending"}, + {"manual", "action_required"}, + {"unknown", "unknown"}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := convertPipelineStatus(tt.input) + if got != tt.want { + t.Errorf("convertPipelineStatus(%q) = %q, want %q", tt.input, got, tt.want) + } + }) + } +} + +func TestConvertMergeStatus(t *testing.T) { + tests := []struct { + name string + mr *mergeRequest + wantMergeable string + }{ + { + name: "mergeable", + mr: &mergeRequest{DetailedMergeStatus: "mergeable"}, + wantMergeable: "clean", + }, + { + name: "ci_must_pass", + mr: &mergeRequest{DetailedMergeStatus: "ci_must_pass"}, + wantMergeable: "blocked", + }, + { + name: "ci_still_running", + mr: &mergeRequest{DetailedMergeStatus: "ci_still_running"}, + wantMergeable: "blocked", + }, + { + name: "discussions_not_resolved", + mr: &mergeRequest{DetailedMergeStatus: "discussions_not_resolved"}, + wantMergeable: "blocked", + }, + { + name: "not_approved", + mr: &mergeRequest{DetailedMergeStatus: "not_approved"}, + wantMergeable: "blocked", + }, + { + name: "conflict", + mr: &mergeRequest{DetailedMergeStatus: "conflict"}, + wantMergeable: "dirty", + }, + { + name: "need_rebase", + mr: &mergeRequest{DetailedMergeStatus: "need_rebase"}, + wantMergeable: "dirty", + }, + { + name: "checking", + mr: &mergeRequest{DetailedMergeStatus: "checking"}, + wantMergeable: "unknown", + }, + { + name: "draft_status", + mr: &mergeRequest{DetailedMergeStatus: "draft_status"}, + wantMergeable: "draft", + }, + { + name: "fallback can_be_merged", + mr: &mergeRequest{MergeStatus: "can_be_merged"}, + wantMergeable: "clean", + }, + { + name: "fallback cannot_be_merged", + mr: &mergeRequest{MergeStatus: "cannot_be_merged"}, + wantMergeable: "dirty", + }, + { + name: "fallback unknown", + mr: &mergeRequest{MergeStatus: "unchecked"}, + wantMergeable: "unknown", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertMergeStatus(tt.mr) + if got != tt.wantMergeable { + t.Errorf("convertMergeStatus() = %q, want %q", got, tt.wantMergeable) + } + }) + } +} + +func TestIsBot(t *testing.T) { + tests := []struct { + name string + user *user + wantBot bool + }{ + {"nil user", nil, false}, + {"regular user", &user{Username: "developer"}, false}, + {"bot suffix", &user{Username: "ci[bot]"}, true}, + {"bot suffix 2", &user{Username: "dependabot-bot"}, true}, + {"ghost user", &user{Username: "ghost"}, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isBot(tt.user) + if got != tt.wantBot { + t.Errorf("isBot() = %v, want %v", got, tt.wantBot) + } + }) + } +} + +func TestConvertSystemNote(t *testing.T) { + now := time.Now() + author := user{Username: "testuser"} + + tests := []struct { + name string + n *note + wantKind string + wantNil bool + }{ + { + name: "approved", + n: ¬e{Body: "approved this merge request", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindReview, + }, + { + name: "unapproved", + n: ¬e{Body: "unapproved this merge request", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindReview, + }, + { + name: "requested review", + n: ¬e{Body: "requested review from @reviewer", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindReviewRequested, + }, + { + name: "assigned", + n: ¬e{Body: "assigned to @assignee", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindAssigned, + }, + { + name: "unassigned", + n: ¬e{Body: "unassigned @user", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindUnassigned, + }, + { + name: "added label", + n: ¬e{Body: "added ~bug label", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindLabeled, + }, + { + name: "removed label", + n: ¬e{Body: "removed ~bug label", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindUnlabeled, + }, + { + name: "marked as draft", + n: ¬e{Body: "marked as a draft", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindConvertToDraft, + }, + { + name: "marked ready", + n: ¬e{Body: "marked this merge request as ready", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindReadyForReview, + }, + { + name: "changed target branch", + n: ¬e{Body: "changed target branch from main to develop", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindBaseRefChanged, + }, + { + name: "mentioned in", + n: ¬e{Body: "mentioned in issue #123", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindCrossReferenced, + }, + { + name: "closed", + n: ¬e{Body: "closed", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindClosed, + }, + { + name: "reopened", + n: ¬e{Body: "reopened", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindReopened, + }, + { + name: "changed title", + n: ¬e{Body: "changed title from old to new", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindRenamedTitle, + }, + { + name: "unknown system note", + n: ¬e{Body: "some unknown system action", Author: author, CreatedAt: now, System: true}, + wantKind: prx.EventKindComment, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertSystemNote(tt.n) + if tt.wantNil { + if got != nil { + t.Errorf("convertSystemNote() = %+v, want nil", got) + } + return + } + if got == nil { + t.Fatal("convertSystemNote() = nil, want non-nil") + } + if got.Kind != tt.wantKind { + t.Errorf("Kind = %q, want %q", got.Kind, tt.wantKind) + } + }) + } +} + +func TestConvertNote(t *testing.T) { + now := time.Now() + author := user{Username: "commenter"} + + t.Run("regular comment", func(t *testing.T) { + n := ¬e{ + Body: "This looks good!", + Author: author, + CreatedAt: now, + System: false, + } + got := convertNote(n) + if got.Kind != prx.EventKindComment { + t.Errorf("Kind = %q, want %q", got.Kind, prx.EventKindComment) + } + if got.Actor != "commenter" { + t.Errorf("Actor = %q, want %q", got.Actor, "commenter") + } + }) + + t.Run("question comment", func(t *testing.T) { + n := ¬e{ + Body: "Can you explain this?", + Author: author, + CreatedAt: now, + System: false, + } + got := convertNote(n) + if !got.Question { + t.Error("Question = false, want true") + } + }) + + t.Run("resolved comment", func(t *testing.T) { + n := ¬e{ + Body: "Fixed", + Author: author, + CreatedAt: now, + System: false, + Resolved: true, + } + got := convertNote(n) + if !got.Outdated { + t.Error("Outdated = false, want true") + } + }) + + t.Run("system note delegates", func(t *testing.T) { + n := ¬e{ + Body: "approved this merge request", + Author: author, + CreatedAt: now, + System: true, + } + got := convertNote(n) + if got.Kind != prx.EventKindReview { + t.Errorf("Kind = %q, want %q", got.Kind, prx.EventKindReview) + } + }) +} + +func TestConvertPipeline(t *testing.T) { + now := time.Now() + started := now.Add(-30 * time.Minute) + finished := now.Add(-5 * time.Minute) + + t.Run("completed pipeline", func(t *testing.T) { + p := &pipeline{ + ID: 123, + Status: "success", + StartedAt: &started, + FinishedAt: &finished, + } + events := convertPipeline(p) + if len(events) != 2 { + t.Fatalf("len(events) = %d, want 2", len(events)) + } + // Check started event + if events[0].Outcome != "running" && events[0].Outcome != "pending" { + t.Errorf("Started event outcome = %q, want running or pending", events[0].Outcome) + } + // Check finished event + if events[1].Outcome != "success" { + t.Errorf("Finished event outcome = %q, want success", events[1].Outcome) + } + }) + + t.Run("pipeline not started", func(t *testing.T) { + p := &pipeline{ + ID: 124, + Status: "pending", + } + events := convertPipeline(p) + if len(events) != 0 { + t.Errorf("len(events) = %d, want 0 for pending pipeline without timestamps", len(events)) + } + }) + + t.Run("running pipeline", func(t *testing.T) { + p := &pipeline{ + ID: 125, + Status: "running", + StartedAt: &started, + } + events := convertPipeline(p) + if len(events) != 1 { + t.Fatalf("len(events) = %d, want 1", len(events)) + } + if events[0].Outcome != "running" { + t.Errorf("Outcome = %q, want running", events[0].Outcome) + } + }) +} + +func TestExtractMentionFromNote(t *testing.T) { + tests := []struct { + body string + want string + }{ + {"requested review from @reviewer", "reviewer"}, + {"assigned to @assignee", "assignee"}, + {"no mention here", ""}, + {"@first @second", "first"}, + {"", ""}, + } + + for _, tt := range tests { + t.Run(tt.body, func(t *testing.T) { + got := extractMentionFromNote(tt.body) + if got != tt.want { + t.Errorf("extractMentionFromNote(%q) = %q, want %q", tt.body, got, tt.want) + } + }) + } +} + +func TestSafeUsername(t *testing.T) { + tests := []struct { + name string + u *user + want string + }{ + {"nil user", nil, ""}, + {"valid user", &user{Username: "testuser"}, "testuser"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := safeUsername(tt.u) + if got != tt.want { + t.Errorf("safeUsername() = %q, want %q", got, tt.want) + } + }) + } +} + +func TestGetDetailedStatusText(t *testing.T) { + tests := []struct { + name string + status *detailedStatus + want string + }{ + {"nil status", nil, ""}, + {"with text", &detailedStatus{Text: "Pipeline passed"}, "Pipeline passed"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getDetailedStatusText(tt.status) + if got != tt.want { + t.Errorf("getDetailedStatusText() = %q, want %q", got, tt.want) + } + }) + } +} + +func TestURLEncode(t *testing.T) { + tests := []struct { + input string + want string + }{ + {"owner/repo", "owner%2Frepo"}, + {"group/subgroup/project", "group%2Fsubgroup%2Fproject"}, + {"simple", "simple"}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := urlEncode(tt.input) + if got != tt.want { + t.Errorf("urlEncode(%q) = %q, want %q", tt.input, got, tt.want) + } + }) + } +} + +func TestConvertMergeRequest(t *testing.T) { + now := time.Now() + mr := &mergeRequest{ + IID: 42, + Title: "Test MR", + Description: "Description", + State: "opened", + Draft: false, + CreatedAt: now, + UpdatedAt: now, + Author: user{Username: "author"}, + DiffRefs: &diffRefs{HeadSHA: "abc123"}, + Labels: []string{"enhancement"}, + Assignees: []user{{Username: "dev1"}}, + Reviewers: []reviewerState{{user: user{Username: "reviewer1"}, State: "unreviewed"}}, + HeadPipeline: &pipeline{Status: "success"}, + MergeStatus: "can_be_merged", + } + + a := &approvals{ + ApprovedBy: []approvalUser{{User: user{Username: "approver1"}}}, + } + + commits := []commit{ + {ShortID: "abc1234"}, + {ShortID: "def5678"}, + } + + result := convertMergeRequest(mr, a, commits) + + if result.Number != 42 { + t.Errorf("Number = %d, want 42", result.Number) + } + if result.Title != "Test MR" { + t.Errorf("Title = %q, want %q", result.Title, "Test MR") + } + if result.Author != "author" { + t.Errorf("Author = %q, want %q", result.Author, "author") + } + if result.State != "open" { + t.Errorf("State = %q, want open", result.State) + } + if len(result.Commits) != 2 { + t.Errorf("len(Commits) = %d, want 2", len(result.Commits)) + } + if result.Reviewers["approver1"] != prx.ReviewStateApproved { + t.Errorf("Reviewers[approver1] = %v, want approved", result.Reviewers["approver1"]) + } + if result.TestState != prx.TestStatePassing { + t.Errorf("TestState = %q, want %q", result.TestState, prx.TestStatePassing) + } +} + +func TestConvertMergeRequest_NoHeadSHA(t *testing.T) { + now := time.Now() + mr := &mergeRequest{ + IID: 1, + State: "opened", + CreatedAt: now, + UpdatedAt: now, + Author: user{Username: "author"}, + SHA: "fallback-sha", + } + + result := convertMergeRequest(mr, nil, nil) + + if result.HeadSHA != "fallback-sha" { + t.Errorf("HeadSHA = %q, want fallback-sha", result.HeadSHA) + } +} + +func TestConvertMergeRequest_Merged(t *testing.T) { + now := time.Now() + mergedAt := now.Add(-time.Hour) + mr := &mergeRequest{ + IID: 1, + State: "merged", + CreatedAt: now.Add(-24 * time.Hour), + UpdatedAt: now, + MergedAt: &mergedAt, + MergedBy: &user{Username: "merger"}, + Author: user{Username: "author"}, + } + + result := convertMergeRequest(mr, nil, nil) + + if !result.Merged { + t.Error("Merged = false, want true") + } + if result.MergedBy != "merger" { + t.Errorf("MergedBy = %q, want merger", result.MergedBy) + } +} + +func TestConvertMergeRequest_BotAuthor(t *testing.T) { + now := time.Now() + mr := &mergeRequest{ + IID: 1, + State: "opened", + CreatedAt: now, + UpdatedAt: now, + Author: user{Username: "dependabot-bot"}, + } + + result := convertMergeRequest(mr, nil, nil) + + if !result.AuthorBot { + t.Error("AuthorBot = false, want true") + } +} diff --git a/pkg/prx/graphql_complete.go b/pkg/prx/graphql_complete.go deleted file mode 100644 index 6b9aea8..0000000 --- a/pkg/prx/graphql_complete.go +++ /dev/null @@ -1,721 +0,0 @@ -package prx - -import ( - "context" - "fmt" - "sort" - "strings" - "time" -) - -// fetchPullRequestCompleteViaGraphQL fetches all PR data in a single GraphQL query. -func (c *Client) fetchPullRequestCompleteViaGraphQL(ctx context.Context, owner, repo string, prNumber int) (*PullRequestData, error) { - data, err := c.executeGraphQL(ctx, owner, repo, prNumber) - if err != nil { - return nil, err - } - - pr := c.convertGraphQLToPullRequest(ctx, data, owner, repo) - events := c.convertGraphQLToEventsComplete(ctx, data, owner, repo) - requiredChecks := c.extractRequiredChecksFromGraphQL(data) - - events = filterEvents(events) - sort.Slice(events, func(i, j int) bool { - return events[i].Timestamp.Before(events[j].Timestamp) - }) - upgradeWriteAccess(events) - - testState := c.calculateTestStateFromGraphQL(data) - finalizePullRequest(&pr, events, requiredChecks, testState) - - return &PullRequestData{ - PullRequest: pr, - Events: events, - }, nil -} - -// executeGraphQL executes the GraphQL query and handles errors. -func (c *Client) executeGraphQL(ctx context.Context, owner, repo string, prNumber int) (*graphQLPullRequestComplete, error) { - variables := map[string]any{ - "owner": owner, - "repo": repo, - "number": prNumber, - } - - var result graphQLCompleteResponse - if err := c.github.GraphQL(ctx, completeGraphQLQuery, variables, &result); err != nil { - return nil, err - } - - if len(result.Errors) > 0 { - var errMsgs []string - var hasPermissionError bool - for _, e := range result.Errors { - errMsgs = append(errMsgs, e.Message) - msg := strings.ToLower(e.Message) - if strings.Contains(msg, "not accessible by integration") || - strings.Contains(msg, "resource not accessible") || - strings.Contains(msg, "forbidden") || - strings.Contains(msg, "insufficient permissions") || - strings.Contains(msg, "requires authentication") { - hasPermissionError = true - } - } - - errStr := strings.Join(errMsgs, "; ") - if result.Data.Repository.PullRequest.Number == 0 { - if hasPermissionError { - return nil, fmt.Errorf( - "fetching PR %s/%s#%d via GraphQL failed due to insufficient permissions: %s "+ - "(note: some fields like branchProtectionRule or refUpdateRule may require push access "+ - "even on public repositories; check token scopes or try using a token with 'repo' or 'public_repo' scope)", - owner, repo, prNumber, errStr) - } - return nil, fmt.Errorf("fetching PR %s/%s#%d via GraphQL: %s", owner, repo, prNumber, errStr) - } - - if hasPermissionError { - c.logger.WarnContext(ctx, "GraphQL query returned permission errors but PR data was retrieved - some fields may be missing", - "owner", owner, - "repo", repo, - "pr", prNumber, - "errors", errStr, - "note", "fields like branchProtectionRule or refUpdateRule require push access") - } else { - c.logger.WarnContext(ctx, "GraphQL query returned errors but PR data was retrieved", - "owner", owner, - "repo", repo, - "pr", prNumber, - "errors", errStr) - } - } - - return &result.Data.Repository.PullRequest, nil -} - -// convertGraphQLToPullRequest converts GraphQL data to PullRequest. -func (c *Client) convertGraphQLToPullRequest(ctx context.Context, data *graphQLPullRequestComplete, owner, repo string) PullRequest { - pr := PullRequest{ - Number: data.Number, - Title: data.Title, - Body: truncate(data.Body), - Author: data.Author.Login, - State: strings.ToLower(data.State), - CreatedAt: data.CreatedAt, - UpdatedAt: data.UpdatedAt, - Draft: data.IsDraft, - Additions: data.Additions, - Deletions: data.Deletions, - ChangedFiles: data.ChangedFiles, - HeadSHA: data.HeadRef.Target.OID, - } - - if data.ClosedAt != nil { - pr.ClosedAt = data.ClosedAt - } - if data.MergedAt != nil { - pr.MergedAt = data.MergedAt - pr.Merged = true - } - if data.MergedBy != nil { - pr.MergedBy = data.MergedBy.Login - } - - switch data.MergeStateStatus { - case "CLEAN": - pr.MergeableState = "clean" - case "UNSTABLE": - pr.MergeableState = "unstable" - case "BLOCKED": - pr.MergeableState = "blocked" - case "BEHIND": - pr.MergeableState = "behind" - case "DIRTY": - pr.MergeableState = "dirty" - default: - pr.MergeableState = strings.ToLower(data.MergeStateStatus) - } - - if data.Author.Login != "" { - pr.AuthorWriteAccess = c.writeAccessFromAssociation(ctx, owner, repo, data.Author.Login, data.AuthorAssociation) - pr.AuthorBot = isBot(data.Author) - } - - pr.Assignees = make([]string, 0) - for _, assignee := range data.Assignees.Nodes { - pr.Assignees = append(pr.Assignees, assignee.Login) - } - - for _, label := range data.Labels.Nodes { - pr.Labels = append(pr.Labels, label.Name) - } - - for _, node := range data.Commits.Nodes { - pr.Commits = append(pr.Commits, node.Commit.OID) - } - - pr.Reviewers = buildReviewersMap(data) - - return pr -} - -// buildReviewersMap constructs a map of reviewer login to their review state. -func buildReviewersMap(data *graphQLPullRequestComplete) map[string]ReviewState { - reviewers := make(map[string]ReviewState) - - for _, request := range data.ReviewRequests.Nodes { - reviewer := request.RequestedReviewer - if reviewer.Login != "" { - reviewers[reviewer.Login] = ReviewStatePending - } else if reviewer.Name != "" { - reviewers[reviewer.Name] = ReviewStatePending - } - } - - for i := range data.Reviews.Nodes { - review := &data.Reviews.Nodes[i] - if review.Author.Login == "" { - continue - } - - var state ReviewState - switch strings.ToUpper(review.State) { - case "APPROVED": - state = ReviewStateApproved - case "CHANGES_REQUESTED": - state = ReviewStateChangesRequested - case "COMMENTED": - state = ReviewStateCommented - default: - continue - } - - reviewers[review.Author.Login] = state - } - - return reviewers -} - -// convertGraphQLToEventsComplete converts GraphQL data to Events. -func (c *Client) convertGraphQLToEventsComplete(ctx context.Context, data *graphQLPullRequestComplete, owner, repo string) []Event { - var events []Event - - events = append(events, Event{ - Kind: EventKindPROpened, - Timestamp: data.CreatedAt, - Actor: data.Author.Login, - Body: truncate(data.Body), - Bot: isBot(data.Author), - WriteAccess: c.writeAccessFromAssociation(ctx, owner, repo, data.Author.Login, data.AuthorAssociation), - }) - - for _, node := range data.Commits.Nodes { - event := Event{ - Kind: EventKindCommit, - Timestamp: node.Commit.CommittedDate, - Body: node.Commit.OID, - Description: truncate(node.Commit.Message), - } - if node.Commit.Author.User != nil { - event.Actor = node.Commit.Author.User.Login - event.Bot = isBot(*node.Commit.Author.User) - } else { - event.Actor = node.Commit.Author.Name - } - events = append(events, event) - } - - for i := range data.Reviews.Nodes { - review := &data.Reviews.Nodes[i] - if review.State == "" { - continue - } - timestamp := review.CreatedAt - if review.SubmittedAt != nil { - timestamp = *review.SubmittedAt - } - event := Event{ - Kind: EventKindReview, - Timestamp: timestamp, - Actor: review.Author.Login, - Body: truncate(review.Body), - Outcome: strings.ToLower(review.State), - Question: containsQuestion(review.Body), - Bot: isBot(review.Author), - WriteAccess: c.writeAccessFromAssociation(ctx, owner, repo, review.Author.Login, review.AuthorAssociation), - } - events = append(events, event) - } - - for i := range data.ReviewThreads.Nodes { - thread := &data.ReviewThreads.Nodes[i] - for j := range thread.Comments.Nodes { - comment := &thread.Comments.Nodes[j] - event := Event{ - Kind: EventKindReviewComment, - Timestamp: comment.CreatedAt, - Actor: comment.Author.Login, - Body: truncate(comment.Body), - Question: containsQuestion(comment.Body), - Bot: isBot(comment.Author), - WriteAccess: c.writeAccessFromAssociation(ctx, owner, repo, comment.Author.Login, comment.AuthorAssociation), - Outdated: comment.Outdated, - } - events = append(events, event) - } - } - - for _, comment := range data.Comments.Nodes { - event := Event{ - Kind: EventKindComment, - Timestamp: comment.CreatedAt, - Actor: comment.Author.Login, - Body: truncate(comment.Body), - Question: containsQuestion(comment.Body), - Bot: isBot(comment.Author), - WriteAccess: c.writeAccessFromAssociation(ctx, owner, repo, comment.Author.Login, comment.AuthorAssociation), - } - events = append(events, event) - } - - if data.HeadRef.Target.StatusCheckRollup != nil { - for i := range data.HeadRef.Target.StatusCheckRollup.Contexts.Nodes { - node := &data.HeadRef.Target.StatusCheckRollup.Contexts.Nodes[i] - switch node.TypeName { - case "CheckRun": - var description string - switch { - case node.Title != "" && node.Summary != "": - description = fmt.Sprintf("%s: %s", node.Title, node.Summary) - case node.Title != "": - description = node.Title - case node.Summary != "": - description = node.Summary - default: - // No description available - } - - if node.StartedAt != nil { - events = append(events, Event{ - Kind: EventKindCheckRun, - Timestamp: *node.StartedAt, - Body: node.Name, - Outcome: strings.ToLower(node.Status), - Bot: true, - Description: description, - }) - } - - if node.CompletedAt != nil { - events = append(events, Event{ - Kind: EventKindCheckRun, - Timestamp: *node.CompletedAt, - Body: node.Name, - Outcome: strings.ToLower(node.Conclusion), - Bot: true, - Description: description, - }) - } - - case "StatusContext": - if node.CreatedAt == nil { - continue - } - event := Event{ - Kind: EventKindStatusCheck, - Timestamp: *node.CreatedAt, - Outcome: strings.ToLower(node.State), - Body: node.Context, - Description: node.Description, - } - if node.Creator != nil { - event.Actor = node.Creator.Login - event.Bot = isBot(*node.Creator) - } - events = append(events, event) - - default: - // Unknown check type, skip - } - } - } - - for _, item := range data.TimelineItems.Nodes { - event := c.parseGraphQLTimelineEvent(ctx, item, owner, repo) - if event != nil { - events = append(events, *event) - } - } - - if data.ClosedAt != nil && !data.IsDraft { - event := Event{ - Kind: EventKindPRClosed, - Timestamp: *data.ClosedAt, - } - if data.MergedBy != nil { - event.Actor = data.MergedBy.Login - event.Kind = EventKindPRMerged - event.Bot = isBot(*data.MergedBy) - } - events = append(events, event) - } - - return events -} - -// parseGraphQLTimelineEvent parses a single timeline event. -// -//nolint:gocognit,maintidx,revive // High complexity justified - must handle all GitHub timeline event types -func (*Client) parseGraphQLTimelineEvent(_ context.Context, item map[string]any, _, _ string) *Event { - typename, ok := item["__typename"].(string) - if !ok { - return nil - } - - getTime := func(key string) *time.Time { - if str, ok := item[key].(string); ok { - if t, err := time.Parse(time.RFC3339, str); err == nil { - return &t - } - } - return nil - } - - getActor := func() string { - if actor, ok := item["actor"].(map[string]any); ok { - if login, ok := actor["login"].(string); ok { - return login - } - } - return "unknown" - } - - isActorBot := func() bool { - if actor, ok := item["actor"].(map[string]any); ok { - var actorObj graphQLActor - if login, ok := actor["login"].(string); ok { - actorObj.Login = login - } - if id, ok := actor["id"].(string); ok { - actorObj.ID = id - } - if typ, ok := actor["__typename"].(string); ok { - actorObj.Type = typ - } - return isBot(actorObj) - } - return false - } - - createdAt := getTime("createdAt") - if createdAt == nil { - return nil - } - - event := &Event{ - Timestamp: *createdAt, - Actor: getActor(), - Bot: isActorBot(), - } - - switch typename { - case "AssignedEvent": - event.Kind = EventKindAssigned - if assignee, ok := item["assignee"].(map[string]any); ok { - if login, ok := assignee["login"].(string); ok { - event.Target = login - } - } - - case "UnassignedEvent": - event.Kind = EventKindUnassigned - if assignee, ok := item["assignee"].(map[string]any); ok { - if login, ok := assignee["login"].(string); ok { - event.Target = login - } - } - - case "LabeledEvent": - event.Kind = EventKindLabeled - if label, ok := item["label"].(map[string]any); ok { - if name, ok := label["name"].(string); ok { - event.Target = name - } - } - - case "UnlabeledEvent": - event.Kind = EventKindUnlabeled - if label, ok := item["label"].(map[string]any); ok { - if name, ok := label["name"].(string); ok { - event.Target = name - } - } - - case "MilestonedEvent": - event.Kind = EventKindMilestoned - if title, ok := item["milestoneTitle"].(string); ok { - event.Target = title - } - - case "DemilestonedEvent": - event.Kind = EventKindDemilestoned - if title, ok := item["milestoneTitle"].(string); ok { - event.Target = title - } - - case "ReviewRequestedEvent": - event.Kind = EventKindReviewRequested - if reviewer, ok := item["requestedReviewer"].(map[string]any); ok { - if login, ok := reviewer["login"].(string); ok { - event.Target = login - } else if name, ok := reviewer["name"].(string); ok { - event.Target = name - } - } - - case "ReviewRequestRemovedEvent": - event.Kind = EventKindReviewRequestRemoved - if reviewer, ok := item["requestedReviewer"].(map[string]any); ok { - if login, ok := reviewer["login"].(string); ok { - event.Target = login - } else if name, ok := reviewer["name"].(string); ok { - event.Target = name - } - } - - case "MentionedEvent": - event.Kind = EventKindMentioned - event.Body = "User was mentioned" - - case "ReadyForReviewEvent": - event.Kind = EventKindReadyForReview - - case "ConvertToDraftEvent": - event.Kind = EventKindConvertToDraft - - case "ClosedEvent": - event.Kind = EventKindClosed - - case "ReopenedEvent": - event.Kind = EventKindReopened - - case "MergedEvent": - event.Kind = EventKindMerged - - case "AutoMergeEnabledEvent": - event.Kind = EventKindAutoMergeEnabled - - case "AutoMergeDisabledEvent": - event.Kind = EventKindAutoMergeDisabled - - case "ReviewDismissedEvent": - event.Kind = EventKindReviewDismissed - if msg, ok := item["dismissalMessage"].(string); ok { - event.Body = msg - } - - case "BaseRefChangedEvent": - event.Kind = EventKindBaseRefChanged - - case "BaseRefForcePushedEvent": - event.Kind = EventKindBaseRefForcePushed - - case "HeadRefForcePushedEvent": - event.Kind = EventKindHeadRefForcePushed - - case "HeadRefDeletedEvent": - event.Kind = EventKindHeadRefDeleted - - case "HeadRefRestoredEvent": - event.Kind = EventKindHeadRefRestored - - case "RenamedTitleEvent": - event.Kind = EventKindRenamedTitle - if prev, ok := item["previousTitle"].(string); ok { - if curr, ok := item["currentTitle"].(string); ok { - event.Body = fmt.Sprintf("Renamed from %q to %q", prev, curr) - } - } - - case "LockedEvent": - event.Kind = EventKindLocked - - case "UnlockedEvent": - event.Kind = EventKindUnlocked - - case "AddedToMergeQueueEvent": - event.Kind = EventKindAddedToMergeQueue - - case "RemovedFromMergeQueueEvent": - event.Kind = EventKindRemovedFromMergeQueue - - case "AutomaticBaseChangeSucceededEvent": - event.Kind = EventKindAutomaticBaseChangeSucceeded - - case "AutomaticBaseChangeFailedEvent": - event.Kind = EventKindAutomaticBaseChangeFailed - - case "ConnectedEvent": - event.Kind = EventKindConnected - - case "DisconnectedEvent": - event.Kind = EventKindDisconnected - - case "CrossReferencedEvent": - event.Kind = EventKindCrossReferenced - - case "ReferencedEvent": - event.Kind = EventKindReferenced - - case "SubscribedEvent": - event.Kind = EventKindSubscribed - - case "UnsubscribedEvent": - event.Kind = EventKindUnsubscribed - - case "DeployedEvent": - event.Kind = EventKindDeployed - - case "DeploymentEnvironmentChangedEvent": - event.Kind = EventKindDeploymentEnvironmentChanged - - case "PinnedEvent": - event.Kind = EventKindPinned - - case "UnpinnedEvent": - event.Kind = EventKindUnpinned - - case "TransferredEvent": - event.Kind = EventKindTransferred - - case "UserBlockedEvent": - event.Kind = EventKindUserBlocked - - default: - return nil - } - - return event -} - -// writeAccessFromAssociation calculates write access from association. -func (c *Client) writeAccessFromAssociation(ctx context.Context, owner, repo, user, association string) int { - if user == "" { - return WriteAccessNA - } - - switch association { - case "OWNER", "COLLABORATOR": - return WriteAccessDefinitely - case "MEMBER": - return c.checkCollaboratorPermission(ctx, owner, repo, user) - case "CONTRIBUTOR", "NONE", "FIRST_TIME_CONTRIBUTOR", "FIRST_TIMER": - return WriteAccessUnlikely - default: - return WriteAccessNA - } -} - -// checkCollaboratorPermission checks if a user has write access. -func (c *Client) checkCollaboratorPermission(ctx context.Context, owner, repo, user string) int { - collabs, err := c.collaboratorsCache.Fetch(collaboratorsCacheKey(owner, repo), func() (map[string]string, error) { - result, fetchErr := c.github.Collaborators(ctx, owner, repo) - if fetchErr != nil { - c.logger.WarnContext(ctx, "failed to fetch collaborators for write access check", - "owner", owner, - "repo", repo, - "user", user, - "error", fetchErr) - - // On any error (including 403 Forbidden), return the error - // so that checkCollaboratorPermission returns WriteAccessLikely - return nil, fetchErr - } - - return result, nil - }) - if err != nil { - return WriteAccessLikely - } - - switch collabs[user] { - case "admin", "maintain", "write": - return WriteAccessDefinitely - case "read", "triage", "none": - return WriteAccessNo - default: - return WriteAccessUnlikely - } -} - -// extractRequiredChecksFromGraphQL gets required checks from GraphQL response. -func (*Client) extractRequiredChecksFromGraphQL(data *graphQLPullRequestComplete) []string { - seen := make(map[string]bool) - - if data.BaseRef.RefUpdateRule != nil { - for _, c := range data.BaseRef.RefUpdateRule.RequiredStatusCheckContexts { - seen[c] = true - } - } - - if data.BaseRef.BranchProtectionRule != nil { - for _, c := range data.BaseRef.BranchProtectionRule.RequiredStatusCheckContexts { - seen[c] = true - } - } - - checks := make([]string, 0, len(seen)) - for c := range seen { - checks = append(checks, c) - } - return checks -} - -// calculateTestStateFromGraphQL determines test state from check runs. -func (*Client) calculateTestStateFromGraphQL(data *graphQLPullRequestComplete) string { - if data.HeadRef.Target.StatusCheckRollup == nil { - return "" - } - - var hasFailure, hasRunning, hasQueued bool - - for i := range data.HeadRef.Target.StatusCheckRollup.Contexts.Nodes { - node := &data.HeadRef.Target.StatusCheckRollup.Contexts.Nodes[i] - if node.TypeName != "CheckRun" { - continue - } - - if !strings.Contains(strings.ToLower(node.Name), "test") && - !strings.Contains(strings.ToLower(node.Name), "check") && - !strings.Contains(strings.ToLower(node.Name), "ci") { - continue - } - - switch strings.ToLower(node.Status) { - case "queued": - hasQueued = true - case "in_progress": - hasRunning = true - default: - // Other statuses don't affect state - } - - switch strings.ToLower(node.Conclusion) { - case "failure", "timed_out", "action_required": - hasFailure = true - default: - // Other conclusions don't affect state - } - } - - if hasFailure { - return "failing" - } - if hasRunning { - return "running" - } - if hasQueued { - return "queued" - } - return "passing" -} diff --git a/pkg/prx/participant_access_test.go b/pkg/prx/participant_access_test.go index 49a1f52..b991220 100644 --- a/pkg/prx/participant_access_test.go +++ b/pkg/prx/participant_access_test.go @@ -44,7 +44,7 @@ func TestCalculateParticipantAccess(t *testing.T) { }, } - participants := calculateParticipantAccess(events, pr) + participants := CalculateParticipantAccess(events, pr) // Verify author is included if access, ok := participants["author1"]; !ok || access != WriteAccessUnlikely { @@ -104,7 +104,7 @@ func TestCalculateParticipantAccessUpgrade(t *testing.T) { }, } - participants := calculateParticipantAccess(events, pr) + participants := CalculateParticipantAccess(events, pr) // Verify user1 got upgraded to WriteAccessDefinitely if access, ok := participants["user1"]; !ok || access != WriteAccessDefinitely { @@ -121,7 +121,7 @@ func TestCalculateParticipantAccessEmpty(t *testing.T) { events := []Event{} - participants := calculateParticipantAccess(events, pr) + participants := CalculateParticipantAccess(events, pr) // Should only have the author if len(participants) != 1 { diff --git a/pkg/prx/platform.go b/pkg/prx/platform.go new file mode 100644 index 0000000..ccb25e1 --- /dev/null +++ b/pkg/prx/platform.go @@ -0,0 +1,17 @@ +package prx + +import ( + "context" + "time" +) + +// Platform fetches pull request data from a code hosting service. +// Each platform (GitHub, GitLab, Codeberg) implements its own fetching strategy. +type Platform interface { + // FetchPR retrieves a pull request with all events and metadata. + // The refTime parameter is used for cache validation decisions. + FetchPR(ctx context.Context, owner, repo string, number int, refTime time.Time) (*PullRequestData, error) + + // Name returns the platform identifier (e.g., "github", "gitlab", "codeberg"). + Name() string +} diff --git a/pkg/prx/pullrequest.go b/pkg/prx/pullrequest.go index c2a8f3a..172fb5b 100644 --- a/pkg/prx/pullrequest.go +++ b/pkg/prx/pullrequest.go @@ -37,9 +37,9 @@ type PullRequest struct { MergedAt *time.Time `json:"merged_at,omitempty"` ApprovalSummary *ApprovalSummary `json:"approval_summary,omitempty"` CheckSummary *CheckSummary `json:"check_summary,omitempty"` - Mergeable *bool `json:"mergeable"` + Mergeable *bool `json:"mergeable,omitempty"` // 24-byte slice/map fields - Assignees []string `json:"assignees"` + Assignees []string `json:"assignees,omitempty"` Labels []string `json:"labels,omitempty"` Commits []string `json:"commits,omitempty"` // List of commit SHAs in chronological order (oldest to newest) Reviewers map[string]ReviewState `json:"reviewers,omitempty"` @@ -99,14 +99,14 @@ type PullRequestData struct { PullRequest PullRequest `json:"pull_request"` } -// finalizePullRequest applies final calculations and consistency fixes. -func finalizePullRequest(pullRequest *PullRequest, events []Event, requiredChecks []string, testStateFromAPI string) { +// FinalizePullRequest applies final calculations and consistency fixes. +func FinalizePullRequest(pullRequest *PullRequest, events []Event, requiredChecks []string, testStateFromAPI string) { pullRequest.TestState = testStateFromAPI - pullRequest.CheckSummary = calculateCheckSummary(events, requiredChecks) - pullRequest.ApprovalSummary = calculateApprovalSummary(events) - pullRequest.ParticipantAccess = calculateParticipantAccess(events, pullRequest) + pullRequest.CheckSummary = CalculateCheckSummary(events, requiredChecks) + pullRequest.ApprovalSummary = CalculateApprovalSummary(events) + pullRequest.ParticipantAccess = CalculateParticipantAccess(events, pullRequest) - fixTestState(pullRequest) + FixTestState(pullRequest) // Ensure mergeable is consistent with mergeable_state if pullRequest.MergeableState == "blocked" || pullRequest.MergeableState == "dirty" || pullRequest.MergeableState == "unstable" { @@ -117,8 +117,11 @@ func finalizePullRequest(pullRequest *PullRequest, events []Event, requiredCheck setMergeableDescription(pullRequest) } -// fixTestState ensures test_state is consistent with check_summary. -func fixTestState(pullRequest *PullRequest) { +// FixTestState ensures test_state is consistent with check_summary. +// If CheckSummary has data, it takes precedence. Otherwise, preserve +// the existing TestState (which may have been set from platform-specific +// data like GitLab pipelines). +func FixTestState(pullRequest *PullRequest) { switch { case len(pullRequest.CheckSummary.Failing) > 0 || len(pullRequest.CheckSummary.Cancelled) > 0: pullRequest.TestState = TestStateFailing @@ -127,7 +130,12 @@ func fixTestState(pullRequest *PullRequest) { case len(pullRequest.CheckSummary.Success) > 0: pullRequest.TestState = TestStatePassing default: - pullRequest.TestState = TestStateNone + // Preserve existing TestState if CheckSummary is empty. + // This allows platform-specific test state (e.g., GitLab pipelines) + // to be retained when there are no check_run events. + if pullRequest.TestState == "" { + pullRequest.TestState = TestStateNone + } } } diff --git a/pkg/prx/pullrequest_extended_test.go b/pkg/prx/pullrequest_extended_test.go index c168fce..1ebeda8 100644 --- a/pkg/prx/pullrequest_extended_test.go +++ b/pkg/prx/pullrequest_extended_test.go @@ -118,7 +118,7 @@ func TestFinalizePullRequest(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - finalizePullRequest(&tt.pr, tt.events, tt.requiredChecks, tt.testStateFromAPI) + FinalizePullRequest(&tt.pr, tt.events, tt.requiredChecks, tt.testStateFromAPI) if tt.pr.TestState != tt.wantTestState { t.Errorf("TestState = %v, want %v", tt.pr.TestState, tt.wantTestState) @@ -194,7 +194,7 @@ func TestFixTestState(t *testing.T) { pr := &PullRequest{ CheckSummary: tt.checkSummary, } - fixTestState(pr) + FixTestState(pr) if pr.TestState != tt.wantTestState { t.Errorf("TestState = %v, want %v", pr.TestState, tt.wantTestState) } diff --git a/pkg/prx/question_integration_test.go b/pkg/prx/question_integration_test.go index 0013831..67fed75 100644 --- a/pkg/prx/question_integration_test.go +++ b/pkg/prx/question_integration_test.go @@ -44,7 +44,7 @@ func TestQuestionFieldIntegration(t *testing.T) { Kind: "review", Body: tt.body, Outcome: tt.outcome, - Question: containsQuestion(tt.body), + Question: ContainsQuestion(tt.body), } if event.Question != tt.expected { diff --git a/pkg/prx/question_test.go b/pkg/prx/question_test.go index c7aaa32..5036667 100644 --- a/pkg/prx/question_test.go +++ b/pkg/prx/question_test.go @@ -367,9 +367,9 @@ func TestContainsQuestion(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := containsQuestion(tt.input) + result := ContainsQuestion(tt.input) if result != tt.expected { - t.Errorf("containsQuestion(%q) = %v, want %v", tt.input, result, tt.expected) + t.Errorf("ContainsQuestion(%q) = %v, want %v", tt.input, result, tt.expected) } }) } @@ -387,7 +387,7 @@ func BenchmarkContainsQuestion(b *testing.B) { for b.Loop() { for _, tc := range testCases { - _ = containsQuestion(tc) + _ = ContainsQuestion(tc) } } } diff --git a/pkg/prx/status_description_test.go b/pkg/prx/status_description_test.go index 0f89e4d..d8a455b 100644 --- a/pkg/prx/status_description_test.go +++ b/pkg/prx/status_description_test.go @@ -3,27 +3,38 @@ package prx import ( "testing" "time" - - "github.com/codeGROOVE-dev/prx/pkg/prx/github" ) +// testCheckRun is a test-only type that mirrors the structure of GitHub check runs. +type testCheckRun struct { + Name string + Status string + Conclusion string + StartedAt time.Time + CompletedAt time.Time + Output struct { + Title string + Summary string + } +} + func TestCheckRunStatusDescriptions(t *testing.T) { tests := []struct { name string - checkRun github.CheckRun + checkRun testCheckRun expectedDesc string expectedOutcome string }{ { name: "check with title and summary", - checkRun: github.CheckRun{ + checkRun: testCheckRun{ Name: "*control", Status: "completed", Conclusion: "failure", CompletedAt: time.Now(), Output: struct { - Title string `json:"title"` - Summary string `json:"summary"` + Title string + Summary string }{ Title: "Plan requires authorisation.", Summary: "Plans submitted by users that are not a member of the organisation require explicit authorisation.", @@ -34,14 +45,14 @@ func TestCheckRunStatusDescriptions(t *testing.T) { }, { name: "check with only title", - checkRun: github.CheckRun{ + checkRun: testCheckRun{ Name: "test-check", Status: "completed", Conclusion: "success", CompletedAt: time.Now(), Output: struct { - Title string `json:"title"` - Summary string `json:"summary"` + Title string + Summary string }{ Title: "All tests passed", }, @@ -51,14 +62,14 @@ func TestCheckRunStatusDescriptions(t *testing.T) { }, { name: "check with only summary", - checkRun: github.CheckRun{ + checkRun: testCheckRun{ Name: "lint-check", Status: "completed", Conclusion: "failure", CompletedAt: time.Now(), Output: struct { - Title string `json:"title"` - Summary string `json:"summary"` + Title string + Summary string }{ Summary: "Found 5 linting errors", }, @@ -68,7 +79,7 @@ func TestCheckRunStatusDescriptions(t *testing.T) { }, { name: "check with no output", - checkRun: github.CheckRun{ + checkRun: testCheckRun{ Name: "basic-check", Status: "completed", Conclusion: "neutral", @@ -79,7 +90,7 @@ func TestCheckRunStatusDescriptions(t *testing.T) { }, { name: "pending check (not completed)", - checkRun: github.CheckRun{ + checkRun: testCheckRun{ Name: "pending-check", Status: "in_progress", StartedAt: time.Now(), @@ -170,7 +181,7 @@ func TestCalculateCheckSummaryWithDescriptions(t *testing.T) { "*control", } - summary := calculateCheckSummary(events, requiredChecks) + summary := CalculateCheckSummary(events, requiredChecks) // Verify counts if len(summary.Success) != 2 { @@ -202,14 +213,14 @@ func TestDropshotPR1359Regression(t *testing.T) { // This test ensures we don't regress on the specific case of Dropshot PR #1359 // where the *control check should show "Plan requires authorisation." description - checkRun := github.CheckRun{ + checkRun := testCheckRun{ Name: "*control", Status: "completed", Conclusion: "failure", CompletedAt: time.Date(2025, 6, 25, 15, 44, 14, 0, time.UTC), Output: struct { - Title string `json:"title"` - Summary string `json:"summary"` + Title string + Summary string }{ Title: "Plan requires authorisation.", Summary: "Plans submitted by users that are not a member of the organisation require explicit authorisation.", @@ -245,7 +256,7 @@ func TestDropshotPR1359Regression(t *testing.T) { // Also test that it appears correctly in the check summary events := []Event{event} - summary := calculateCheckSummary(events, []string{}) + summary := CalculateCheckSummary(events, []string{}) if desc, exists := summary.Failing["*control"]; !exists { t.Error("Regression detected: *control not in failing statuses") diff --git a/pkg/prx/upgrade_write_access_test.go b/pkg/prx/upgrade_write_access_test.go index f09dc0f..10e97b2 100644 --- a/pkg/prx/upgrade_write_access_test.go +++ b/pkg/prx/upgrade_write_access_test.go @@ -153,7 +153,7 @@ func TestUpgradeWriteAccess(t *testing.T) { copy(events, tt.events) // Apply the upgrade function - upgradeWriteAccess(events) + UpgradeWriteAccess(events) // Check results - look for events with WriteAccess field for _, event := range events { diff --git a/pkg/prx/url.go b/pkg/prx/url.go new file mode 100644 index 0000000..0e0d919 --- /dev/null +++ b/pkg/prx/url.go @@ -0,0 +1,265 @@ +package prx + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// ParsedURL represents a parsed code hosting URL. +type ParsedURL struct { + Platform string // "github", "gitlab", "codeberg" + Host string // e.g., "github.com", "gitlab.com" + Owner string + Repo string + Number int // PR or MR number +} + +// Common platform hosts. +const ( + PlatformGitHub = "github" + PlatformGitLab = "gitlab" + PlatformCodeberg = "codeberg" +) + +var ( + // GitHub PR URL patterns. + githubPRPattern = regexp.MustCompile(`^(?:https?://)?github\.com/([^/]+)/([^/]+)/pull/(\d+)`) + + // GitLab MR URL patterns. + gitlabMRPattern = regexp.MustCompile(`^(?:https?://)?([^/]+)/([^/]+)/([^/]+)/-/merge_requests/(\d+)`) + + // Codeberg (Gitea) PR URL patterns. + codebergPRPattern = regexp.MustCompile(`^(?:https?://)?codeberg\.org/([^/]+)/([^/]+)/pulls/(\d+)`) +) + +var ( + errEmptyURL = errors.New("empty URL") + errInvalidGitHubURL = errors.New("invalid GitHub PR URL format, expected: github.com/owner/repo/pull/123") + errInvalidGitLabURL = errors.New("invalid GitLab MR URL format, expected: gitlab.com/owner/repo/-/merge_requests/123") + errInvalidCodeberg = errors.New("invalid Codeberg PR URL format, expected: codeberg.org/owner/repo/pulls/123") +) + +// ParseURL parses a pull request or merge request URL and returns its components. +// It detects the platform based on the URL structure and host. +func ParseURL(input string) (*ParsedURL, error) { + input = strings.TrimSpace(input) + if input == "" { + return nil, errEmptyURL + } + + // Try GitHub pattern first (most common). + if match := githubPRPattern.FindStringSubmatch(input); match != nil { + number, err := strconv.Atoi(match[3]) + if err != nil { + return nil, errInvalidGitHubURL + } + return &ParsedURL{ + Platform: PlatformGitHub, + Host: "github.com", + Owner: match[1], + Repo: match[2], + Number: number, + }, nil + } + + // Try Codeberg pattern (before GitLab since it's more specific). + if match := codebergPRPattern.FindStringSubmatch(input); match != nil { + number, err := strconv.Atoi(match[3]) + if err != nil { + return nil, errInvalidCodeberg + } + return &ParsedURL{ + Platform: PlatformCodeberg, + Host: "codeberg.org", + Owner: match[1], + Repo: match[2], + Number: number, + }, nil + } + + // Try GitLab pattern (includes self-hosted instances). + if match := gitlabMRPattern.FindStringSubmatch(input); match != nil { + number, err := strconv.Atoi(match[4]) + if err != nil { + return nil, errInvalidGitLabURL + } + return &ParsedURL{ + Platform: PlatformGitLab, + Host: match[1], + Owner: match[2], + Repo: match[3], + Number: number, + }, nil + } + + // Try to provide a helpful error message. + if strings.Contains(input, "github.com") { + return nil, errInvalidGitHubURL + } + if strings.Contains(input, "gitlab") || strings.Contains(input, "merge_requests") { + return nil, errInvalidGitLabURL + } + if strings.Contains(input, "codeberg.org") { + return nil, errInvalidCodeberg + } + + return nil, fmt.Errorf("unrecognized URL format: %s", input) +} + +// DetectPlatform attempts to detect the platform from a host name. +// Returns the platform identifier or empty string if unknown. +func DetectPlatform(host string) string { + host = strings.ToLower(host) + + switch { + case host == "github.com" || strings.HasSuffix(host, ".github.com"): + return PlatformGitHub + case host == "gitlab.com" || strings.Contains(host, "gitlab"): + return PlatformGitLab + case host == "codeberg.org": + return PlatformCodeberg + default: + return "" + } +} + +// BuildGitHubURL constructs a GitHub PR URL from components. +func BuildGitHubURL(owner, repo string, number int) string { + return fmt.Sprintf("https://github.com/%s/%s/pull/%d", owner, repo, number) +} + +// BuildGitLabURL constructs a GitLab MR URL from components. +func BuildGitLabURL(host, owner, repo string, number int) string { + if host == "" { + host = "gitlab.com" + } + return fmt.Sprintf("https://%s/%s/%s/-/merge_requests/%d", host, owner, repo, number) +} + +// BuildCodebergURL constructs a Codeberg PR URL from components. +func BuildCodebergURL(owner, repo string, number int) string { + return fmt.Sprintf("https://codeberg.org/%s/%s/pulls/%d", owner, repo, number) +} + +// BuildGiteaURL constructs a Gitea PR URL from components. +// For Codeberg, use BuildCodebergURL instead. +func BuildGiteaURL(host, owner, repo string, number int) string { + if host == "" { + host = "codeberg.org" + } + return fmt.Sprintf("https://%s/%s/%s/pulls/%d", host, owner, repo, number) +} + +// NormalizeURL takes any supported URL format and returns a normalized URL string. +func NormalizeURL(input string) (string, error) { + parsed, err := ParseURL(input) + if err != nil { + return "", err + } + + switch parsed.Platform { + case PlatformGitHub: + return BuildGitHubURL(parsed.Owner, parsed.Repo, parsed.Number), nil + case PlatformGitLab: + return BuildGitLabURL(parsed.Host, parsed.Owner, parsed.Repo, parsed.Number), nil + case PlatformCodeberg: + return BuildCodebergURL(parsed.Owner, parsed.Repo, parsed.Number), nil + default: + return "", fmt.Errorf("unknown platform: %s", parsed.Platform) + } +} + +// IsValidPRURL returns true if the input appears to be a valid PR/MR URL. +func IsValidPRURL(input string) bool { + _, err := ParseURL(input) + return err == nil +} + +// ExtractShortRef returns a short reference string like "owner/repo#123". +func ExtractShortRef(input string) (string, error) { + parsed, err := ParseURL(input) + if err != nil { + return "", err + } + return fmt.Sprintf("%s/%s#%d", parsed.Owner, parsed.Repo, parsed.Number), nil +} + +// ShortRef represents a parsed short reference like "owner/repo#123". +type ShortRef struct { + Owner string + Repo string + Number int +} + +// ParseShortRef parses a short reference like "owner/repo#123" or "owner/repo/123". +// It does not include platform information - that must be provided separately. +func ParseShortRef(ref string) (*ShortRef, error) { + ref = strings.TrimSpace(ref) + + // Try "owner/repo#123" format. + if idx := strings.Index(ref, "#"); idx != -1 { + parts := strings.Split(ref[:idx], "/") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid short reference format: %s", ref) + } + num, err := strconv.Atoi(ref[idx+1:]) + if err != nil { + return nil, fmt.Errorf("invalid PR number in reference: %s", ref) + } + return &ShortRef{Owner: parts[0], Repo: parts[1], Number: num}, nil + } + + // Try "owner/repo/123" format. + parts := strings.Split(ref, "/") + if len(parts) == 3 { + num, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("invalid PR number in reference: %s", ref) + } + return &ShortRef{Owner: parts[0], Repo: parts[1], Number: num}, nil + } + + return nil, fmt.Errorf("invalid short reference format: %s", ref) +} + +// ParsedPR represents the result of parsing a PR reference in any format. +type ParsedPR struct { + Owner string + Repo string + Platform string // Empty if parsed from short ref + Number int +} + +// ParseOwnerRepoPR is a convenience function that accepts multiple input formats: +// full URL, short ref with hash, or short ref with slash. +func ParseOwnerRepoPR(input string) (*ParsedPR, error) { + input = strings.TrimSpace(input) + + // Check if it looks like a URL. + if strings.Contains(input, "://") || strings.Contains(input, ".com") || strings.Contains(input, ".org") { + parsed, err := ParseURL(input) + if err != nil { + return nil, err + } + return &ParsedPR{ + Owner: parsed.Owner, + Repo: parsed.Repo, + Number: parsed.Number, + Platform: parsed.Platform, + }, nil + } + + // Try short ref format. + shortRef, err := ParseShortRef(input) + if err != nil { + return nil, err + } + return &ParsedPR{ + Owner: shortRef.Owner, + Repo: shortRef.Repo, + Number: shortRef.Number, + }, nil +} diff --git a/pkg/prx/url_test.go b/pkg/prx/url_test.go new file mode 100644 index 0000000..18c9583 --- /dev/null +++ b/pkg/prx/url_test.go @@ -0,0 +1,504 @@ +package prx + +import ( + "strings" + "testing" +) + +func TestParseURL(t *testing.T) { + tests := []struct { + name string + input string + want *ParsedURL + wantErr bool + errMatch string + }{ + // GitHub URLs + { + name: "github full URL with https", + input: "https://github.com/owner/repo/pull/123", + want: &ParsedURL{ + Platform: PlatformGitHub, + Host: "github.com", + Owner: "owner", + Repo: "repo", + Number: 123, + }, + }, + { + name: "github URL without scheme", + input: "github.com/owner/repo/pull/456", + want: &ParsedURL{ + Platform: PlatformGitHub, + Host: "github.com", + Owner: "owner", + Repo: "repo", + Number: 456, + }, + }, + { + name: "github URL with http", + input: "http://github.com/kubernetes/kubernetes/pull/99999", + want: &ParsedURL{ + Platform: PlatformGitHub, + Host: "github.com", + Owner: "kubernetes", + Repo: "kubernetes", + Number: 99999, + }, + }, + { + name: "github URL with dashes in names", + input: "https://github.com/my-org/my-repo/pull/1", + want: &ParsedURL{ + Platform: PlatformGitHub, + Host: "github.com", + Owner: "my-org", + Repo: "my-repo", + Number: 1, + }, + }, + + // GitLab URLs + { + name: "gitlab.com MR URL", + input: "https://gitlab.com/owner/repo/-/merge_requests/123", + want: &ParsedURL{ + Platform: PlatformGitLab, + Host: "gitlab.com", + Owner: "owner", + Repo: "repo", + Number: 123, + }, + }, + { + name: "self-hosted gitlab MR URL", + input: "https://gitlab.example.com/team/project/-/merge_requests/456", + want: &ParsedURL{ + Platform: PlatformGitLab, + Host: "gitlab.example.com", + Owner: "team", + Repo: "project", + Number: 456, + }, + }, + + // Codeberg URLs + { + name: "codeberg PR URL", + input: "https://codeberg.org/owner/repo/pulls/123", + want: &ParsedURL{ + Platform: PlatformCodeberg, + Host: "codeberg.org", + Owner: "owner", + Repo: "repo", + Number: 123, + }, + }, + { + name: "codeberg URL without scheme", + input: "codeberg.org/forgejo/forgejo/pulls/789", + want: &ParsedURL{ + Platform: PlatformCodeberg, + Host: "codeberg.org", + Owner: "forgejo", + Repo: "forgejo", + Number: 789, + }, + }, + + // Error cases + { + name: "empty input", + input: "", + wantErr: true, + errMatch: "empty URL", + }, + { + name: "whitespace only", + input: " ", + wantErr: true, + errMatch: "empty URL", + }, + { + name: "invalid github URL - wrong path", + input: "https://github.com/owner/repo/issues/123", + wantErr: true, + errMatch: "invalid GitHub PR URL", + }, + { + name: "invalid gitlab URL - missing merge_requests", + input: "https://gitlab.com/owner/repo/123", + wantErr: true, + errMatch: "invalid GitLab MR URL", + }, + { + name: "random URL", + input: "https://example.com/foo/bar", + wantErr: true, + errMatch: "unrecognized URL format", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseURL(tt.input) + + if tt.wantErr { + if err == nil { + t.Errorf("ParseURL() expected error containing %q, got nil", tt.errMatch) + return + } + if tt.errMatch != "" && !strings.Contains(err.Error(), tt.errMatch) { + t.Errorf("ParseURL() error = %q, want error containing %q", err.Error(), tt.errMatch) + } + return + } + + if err != nil { + t.Errorf("ParseURL() unexpected error: %v", err) + return + } + + if got.Platform != tt.want.Platform { + t.Errorf("Platform = %q, want %q", got.Platform, tt.want.Platform) + } + if got.Host != tt.want.Host { + t.Errorf("Host = %q, want %q", got.Host, tt.want.Host) + } + if got.Owner != tt.want.Owner { + t.Errorf("Owner = %q, want %q", got.Owner, tt.want.Owner) + } + if got.Repo != tt.want.Repo { + t.Errorf("Repo = %q, want %q", got.Repo, tt.want.Repo) + } + if got.Number != tt.want.Number { + t.Errorf("Number = %d, want %d", got.Number, tt.want.Number) + } + }) + } +} + +func TestDetectPlatform(t *testing.T) { + tests := []struct { + host string + want string + }{ + {"github.com", PlatformGitHub}, + {"GITHUB.COM", PlatformGitHub}, + {"api.github.com", PlatformGitHub}, + {"gitlab.com", PlatformGitLab}, + {"gitlab.example.com", PlatformGitLab}, + {"my-gitlab.internal", PlatformGitLab}, + {"codeberg.org", PlatformCodeberg}, + {"example.com", ""}, + {"bitbucket.org", ""}, + } + + for _, tt := range tests { + t.Run(tt.host, func(t *testing.T) { + got := DetectPlatform(tt.host) + if got != tt.want { + t.Errorf("DetectPlatform(%q) = %q, want %q", tt.host, got, tt.want) + } + }) + } +} + +func TestBuildURLs(t *testing.T) { + t.Run("GitHub", func(t *testing.T) { + got := BuildGitHubURL("owner", "repo", 123) + want := "https://github.com/owner/repo/pull/123" + if got != want { + t.Errorf("BuildGitHubURL() = %q, want %q", got, want) + } + }) + + t.Run("GitLab default host", func(t *testing.T) { + got := BuildGitLabURL("", "owner", "repo", 456) + want := "https://gitlab.com/owner/repo/-/merge_requests/456" + if got != want { + t.Errorf("BuildGitLabURL() = %q, want %q", got, want) + } + }) + + t.Run("GitLab custom host", func(t *testing.T) { + got := BuildGitLabURL("gitlab.example.com", "team", "project", 789) + want := "https://gitlab.example.com/team/project/-/merge_requests/789" + if got != want { + t.Errorf("BuildGitLabURL() = %q, want %q", got, want) + } + }) + + t.Run("Codeberg", func(t *testing.T) { + got := BuildCodebergURL("forgejo", "forgejo", 999) + want := "https://codeberg.org/forgejo/forgejo/pulls/999" + if got != want { + t.Errorf("BuildCodebergURL() = %q, want %q", got, want) + } + }) + + t.Run("Gitea default host", func(t *testing.T) { + got := BuildGiteaURL("", "owner", "repo", 123) + want := "https://codeberg.org/owner/repo/pulls/123" + if got != want { + t.Errorf("BuildGiteaURL() = %q, want %q", got, want) + } + }) + + t.Run("Gitea custom host", func(t *testing.T) { + got := BuildGiteaURL("gitea.example.com", "team", "project", 456) + want := "https://gitea.example.com/team/project/pulls/456" + if got != want { + t.Errorf("BuildGiteaURL() = %q, want %q", got, want) + } + }) +} + +func TestNormalizeURL(t *testing.T) { + tests := []struct { + name string + input string + want string + wantErr bool + }{ + { + name: "github without scheme", + input: "github.com/owner/repo/pull/123", + want: "https://github.com/owner/repo/pull/123", + }, + { + name: "gitlab", + input: "https://gitlab.com/owner/repo/-/merge_requests/456", + want: "https://gitlab.com/owner/repo/-/merge_requests/456", + }, + { + name: "invalid URL", + input: "not-a-url", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NormalizeURL(tt.input) + if tt.wantErr { + if err == nil { + t.Error("NormalizeURL() expected error, got nil") + } + return + } + if err != nil { + t.Errorf("NormalizeURL() unexpected error: %v", err) + return + } + if got != tt.want { + t.Errorf("NormalizeURL() = %q, want %q", got, tt.want) + } + }) + } +} + +func TestIsValidPRURL(t *testing.T) { + tests := []struct { + input string + want bool + }{ + {"https://github.com/owner/repo/pull/123", true}, + {"https://gitlab.com/owner/repo/-/merge_requests/456", true}, + {"https://codeberg.org/owner/repo/pulls/789", true}, + {"not-a-url", false}, + {"https://example.com/foo", false}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got := IsValidPRURL(tt.input) + if got != tt.want { + t.Errorf("IsValidPRURL(%q) = %v, want %v", tt.input, got, tt.want) + } + }) + } +} + +func TestExtractShortRef(t *testing.T) { + tests := []struct { + input string + want string + wantErr bool + }{ + {"https://github.com/kubernetes/kubernetes/pull/123", "kubernetes/kubernetes#123", false}, + {"https://gitlab.com/owner/repo/-/merge_requests/456", "owner/repo#456", false}, + {"invalid", "", true}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + got, err := ExtractShortRef(tt.input) + if tt.wantErr { + if err == nil { + t.Error("ExtractShortRef() expected error, got nil") + } + return + } + if err != nil { + t.Errorf("ExtractShortRef() unexpected error: %v", err) + return + } + if got != tt.want { + t.Errorf("ExtractShortRef() = %q, want %q", got, tt.want) + } + }) + } +} + +func TestParseShortRef(t *testing.T) { + tests := []struct { + name string + ref string + wantOwner string + wantRepo string + wantNumber int + wantErr bool + }{ + { + name: "hash format", + ref: "owner/repo#123", + wantOwner: "owner", + wantRepo: "repo", + wantNumber: 123, + }, + { + name: "slash format", + ref: "owner/repo/456", + wantOwner: "owner", + wantRepo: "repo", + wantNumber: 456, + }, + { + name: "with whitespace", + ref: " owner/repo#789 ", + wantOwner: "owner", + wantRepo: "repo", + wantNumber: 789, + }, + { + name: "invalid format - no number", + ref: "owner/repo", + wantErr: true, + }, + { + name: "invalid format - not a number", + ref: "owner/repo#abc", + wantErr: true, + }, + { + name: "invalid format - too many parts", + ref: "a/b/c/d", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseShortRef(tt.ref) + if tt.wantErr { + if err == nil { + t.Error("ParseShortRef() expected error, got nil") + } + return + } + if err != nil { + t.Errorf("ParseShortRef() unexpected error: %v", err) + return + } + if got.Owner != tt.wantOwner { + t.Errorf("owner = %q, want %q", got.Owner, tt.wantOwner) + } + if got.Repo != tt.wantRepo { + t.Errorf("repo = %q, want %q", got.Repo, tt.wantRepo) + } + if got.Number != tt.wantNumber { + t.Errorf("number = %d, want %d", got.Number, tt.wantNumber) + } + }) + } +} + +func TestParseOwnerRepoPR(t *testing.T) { + tests := []struct { + name string + input string + wantOwner string + wantRepo string + wantNumber int + wantPlatform string + wantErr bool + }{ + { + name: "full github URL", + input: "https://github.com/owner/repo/pull/123", + wantOwner: "owner", + wantRepo: "repo", + wantNumber: 123, + wantPlatform: PlatformGitHub, + }, + { + name: "short ref with hash", + input: "owner/repo#456", + wantOwner: "owner", + wantRepo: "repo", + wantNumber: 456, + wantPlatform: "", // No platform for short refs + }, + { + name: "short ref with slash", + input: "owner/repo/789", + wantOwner: "owner", + wantRepo: "repo", + wantNumber: 789, + wantPlatform: "", + }, + { + name: "gitlab URL", + input: "https://gitlab.com/team/project/-/merge_requests/999", + wantOwner: "team", + wantRepo: "project", + wantNumber: 999, + wantPlatform: PlatformGitLab, + }, + { + name: "invalid", + input: "not-valid", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseOwnerRepoPR(tt.input) + if tt.wantErr { + if err == nil { + t.Error("ParseOwnerRepoPR() expected error, got nil") + } + return + } + if err != nil { + t.Errorf("ParseOwnerRepoPR() unexpected error: %v", err) + return + } + if got.Owner != tt.wantOwner { + t.Errorf("owner = %q, want %q", got.Owner, tt.wantOwner) + } + if got.Repo != tt.wantRepo { + t.Errorf("repo = %q, want %q", got.Repo, tt.wantRepo) + } + if got.Number != tt.wantNumber { + t.Errorf("number = %d, want %d", got.Number, tt.wantNumber) + } + if got.Platform != tt.wantPlatform { + t.Errorf("platform = %q, want %q", got.Platform, tt.wantPlatform) + } + }) + } +} diff --git a/pkg/prx/utils.go b/pkg/prx/utils.go index f737690..1a48854 100644 --- a/pkg/prx/utils.go +++ b/pkg/prx/utils.go @@ -120,10 +120,10 @@ func initQuestionRegexes() { } } -// containsQuestion determines if text contains a question based on: +// ContainsQuestion determines if text contains a question based on: // 1. Presence of a question mark // 2. Common question patterns with proper word boundaries. -func containsQuestion(text string) bool { +func ContainsQuestion(text string) bool { // Quick check for question mark if strings.Contains(text, "?") { return true @@ -158,7 +158,8 @@ func isHexString(s string) bool { return true } -func truncate(s string) string { +// Truncate returns the first maxTruncateLength characters of s. +func Truncate(s string) string { if len(s) <= maxTruncateLength { return s } diff --git a/pkg/prx/utils_test.go b/pkg/prx/utils_test.go index 277ebe1..9df987f 100644 --- a/pkg/prx/utils_test.go +++ b/pkg/prx/utils_test.go @@ -181,7 +181,7 @@ func TestCalculateCheckSummaryWithMaps(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - summary := calculateCheckSummary(tt.events, tt.requiredChecks) + summary := CalculateCheckSummary(tt.events, tt.requiredChecks) if !reflect.DeepEqual(summary.Success, tt.expectedSuccess) { t.Errorf("Success mismatch\ngot: %v\nwant: %v", summary.Success, tt.expectedSuccess) @@ -210,7 +210,7 @@ func TestCalculateCheckSummaryWithMaps(t *testing.T) { func TestCheckSummaryInitialization(t *testing.T) { // Test that maps are properly initialized even with no events - summary := calculateCheckSummary([]Event{}, []string{}) + summary := CalculateCheckSummary([]Event{}, []string{}) if summary.Success == nil { t.Error("Success map should be initialized, not nil") @@ -402,7 +402,7 @@ func TestCalculateApprovalSummaryWriteAccessCategories(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - summary := calculateApprovalSummary(tt.events) + summary := CalculateApprovalSummary(tt.events) if summary.ApprovalsWithWriteAccess != tt.expectedWithAccess { t.Errorf("ApprovalsWithWriteAccess: got %d, want %d", @@ -458,7 +458,7 @@ func TestCheckSummaryCancelledNotInFailing(t *testing.T) { }, } - summary := calculateCheckSummary(events, []string{}) + summary := CalculateCheckSummary(events, []string{}) // Verify cancelled check is ONLY in cancelled map if _, exists := summary.Cancelled["Test (macos-latest)"]; !exists {