diff --git a/Cargo.lock b/Cargo.lock index 3da5d6e7..62f2331c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31,6 +31,15 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "anstream" version = "1.0.0" @@ -206,6 +215,17 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chrono" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" +dependencies = [ + "iana-time-zone", + "num-traits", + "windows-link", +] + [[package]] name = "clap" version = "4.6.1" @@ -720,6 +740,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + [[package]] name = "icu_collections" version = "2.1.1" @@ -1085,6 +1129,9 @@ dependencies = [ name = "mergify-queue" version = "0.0.0" dependencies = [ + "anstyle", + "chrono", + "indexmap", "mergify-core", "serde", "serde_json", @@ -2307,12 +2354,65 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.52.0" diff --git a/crates/mergify-cli/src/main.rs b/crates/mergify-cli/src/main.rs index b3e658af..53f47f99 100644 --- a/crates/mergify-cli/src/main.rs +++ b/crates/mergify-cli/src/main.rs @@ -27,6 +27,7 @@ use mergify_config::simulate::SimulateOptions; use mergify_core::OutputMode; use mergify_core::StdioOutput; use mergify_queue::pause::PauseOptions; +use mergify_queue::status::StatusOptions; use mergify_queue::unpause::UnpauseOptions; fn main() -> ExitCode { @@ -71,6 +72,7 @@ enum NativeCommand { CiQueueInfo, QueuePause(QueuePauseOpts), QueueUnpause(QueueUnpauseOpts), + QueueStatus(QueueStatusOpts), } struct ConfigSimulateOpts { @@ -105,6 +107,14 @@ struct QueueUnpauseOpts { api_url: Option, } +struct QueueStatusOpts { + repository: Option, + token: Option, + api_url: Option, + branch: Option, + output_json: bool, +} + /// Heuristic: does argv look like the user intended a native /// subcommand? /// @@ -120,7 +130,7 @@ fn looks_native(argv: &[String]) -> bool { (pair[0].as_str(), pair[1].as_str()), ("config", "validate" | "simulate") | ("ci", "scopes-send" | "git-refs" | "queue-info") - | ("queue", "pause" | "unpause"), + | ("queue", "pause" | "unpause" | "status"), ) }) } @@ -255,6 +265,18 @@ fn detect_native(argv: &[String]) -> Option { token, api_url, })), + Subcommands::Queue(QueueArgs { + repository, + token, + api_url, + command: QueueSubcommand::Status(StatusCliArgs { branch, json }), + }) => Some(NativeCommand::QueueStatus(QueueStatusOpts { + repository, + token, + api_url, + branch, + output_json: json, + })), } } @@ -333,6 +355,19 @@ fn run_native(cmd: NativeCommand) -> ExitCode { ) .await } + NativeCommand::QueueStatus(opts) => { + mergify_queue::status::run( + StatusOptions { + repository: opts.repository.as_deref(), + token: opts.token.as_deref(), + api_url: opts.api_url.as_deref(), + branch: opts.branch.as_deref(), + output_json: opts.output_json, + }, + &mut output, + ) + .await + } } }); @@ -504,6 +539,8 @@ enum QueueSubcommand { Pause(PauseCliArgs), /// Unpause the merge queue for the repository. Unpause, + /// Show merge queue status for the repository. + Status(StatusCliArgs), } #[derive(clap::Args)] @@ -517,3 +554,14 @@ struct PauseCliArgs { #[arg(long = "yes-i-am-sure", default_value_t = false)] yes_i_am_sure: bool, } + +#[derive(clap::Args)] +struct StatusCliArgs { + /// Filter the queue by branch name. + #[arg(long, short = 'b')] + branch: Option, + + /// Emit the raw API response as a single JSON document. + #[arg(long, default_value_t = false)] + json: bool, +} diff --git a/crates/mergify-queue/Cargo.toml b/crates/mergify-queue/Cargo.toml index bdd0ceca..bd0761b3 100644 --- a/crates/mergify-queue/Cargo.toml +++ b/crates/mergify-queue/Cargo.toml @@ -11,11 +11,14 @@ publish = false [dependencies] mergify-core = { path = "../mergify-core" } +anstyle = "1" +chrono = { version = "0.4", default-features = false, features = ["clock"] } +indexmap = "2" serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" url = "2" [dev-dependencies] -serde_json = "1.0" tokio = { version = "1", default-features = false, features = ["macros", "rt", "time"] } wiremock = "0.6" diff --git a/crates/mergify-queue/src/lib.rs b/crates/mergify-queue/src/lib.rs index 1de93ff8..96455cb4 100644 --- a/crates/mergify-queue/src/lib.rs +++ b/crates/mergify-queue/src/lib.rs @@ -1,11 +1,14 @@ //! Native Rust implementation of the `mergify queue` subcommands. //! -//! Phase 1.5 ports `pause` and `unpause` — two idempotent API -//! calls that rest on the HTTP client added in 1.2b and the new +//! Phase 1.5 ported `pause` and `unpause` — two idempotent API +//! calls that rest on the HTTP client added in 1.2b and the //! `put`/`delete_if_exists` methods added alongside this crate. -//! `queue status` and `queue show` stay shimmed until their -//! JSON-output contracts are locked (they carry considerable -//! structured data and want careful schema work). +//! Phase 1.7 ports `status`, the read-only command that fetches +//! the merge-queue snapshot and renders it either as a JSON +//! passthrough or as the human-friendly batch tree + waiting list. +//! `queue show` stays shimmed until its conditions/checks tree +//! ports next. pub mod pause; +pub mod status; pub mod unpause; diff --git a/crates/mergify-queue/src/status.rs b/crates/mergify-queue/src/status.rs new file mode 100644 index 00000000..881e33b1 --- /dev/null +++ b/crates/mergify-queue/src/status.rs @@ -0,0 +1,1069 @@ +//! `mergify queue status` — show merge queue status for a repository. +//! +//! `GET /v1/repos//merge-queue/status[?branch=]`. Two +//! output modes: +//! +//! - `--json`: pretty-prints the raw API response as a single JSON +//! document. The schema is Mergify's API contract, not this CLI's, +//! so unknown fields are preserved (deserialize to +//! `serde_json::Value`, emit verbatim). +//! - Human (default): a header, an optional pause indicator, the +//! batch tree (grouped by scope when there is more than one), and +//! the waiting-PR list. Status icons and relative times match the +//! Python implementation. +//! +//! The command does not assume the response shape beyond the fields +//! it actively renders: every nested struct uses +//! `#[serde(default)] Option<…>` for fields the API has historically +//! treated as optional/nullable, so a missing field doesn't abort +//! deserialization. +//! +//! Exit codes: +//! +//! - `0` on a successful render (queue empty, paused, or active). +//! - Standard `CliError` exit codes on auth, API, or +//! parse/serialization errors. + +use std::collections::HashMap; +use std::collections::HashSet; +use std::io::IsTerminal; +use std::io::Write; + +use anstyle::AnsiColor; +use anstyle::Style; +use chrono::DateTime; +use chrono::Utc; +use indexmap::IndexMap; +use mergify_core::ApiFlavor; +use mergify_core::CliError; +use mergify_core::HttpClient; +use mergify_core::Output; +use mergify_core::auth; +use serde::Deserialize; +use url::form_urlencoded; + +pub struct StatusOptions<'a> { + pub repository: Option<&'a str>, + pub token: Option<&'a str>, + pub api_url: Option<&'a str>, + pub branch: Option<&'a str>, + pub output_json: bool, +} + +// All view structs use `#[serde(default)] Option<…>` for fields the +// API has historically treated as optional/nullable. The wire format +// is Mergify's API contract — we deserialize only the fields we +// render and accept everything else implicitly via the +// `serde_json::Value` passthrough used in JSON mode. +#[derive(Deserialize)] +struct StatusView { + #[serde(default)] + pause: Option, + #[serde(default)] + batches: Vec, + #[serde(default)] + waiting_pull_requests: Vec, +} + +#[derive(Deserialize)] +struct Pause { + #[serde(default)] + reason: Option, + #[serde(default)] + paused_at: Option, +} + +#[derive(Deserialize)] +struct Batch { + id: String, + #[serde(default)] + parent_ids: Vec, + #[serde(default)] + scopes: Vec, + status: BatchStatus, + #[serde(default)] + started_at: Option, + #[serde(default)] + estimated_merge_at: Option, + checks_summary: ChecksSummary, + #[serde(default)] + pull_requests: Vec, +} + +#[derive(Deserialize)] +struct BatchStatus { + code: String, +} + +#[derive(Deserialize)] +struct ChecksSummary { + #[serde(default)] + passed: u64, + #[serde(default)] + total: u64, +} + +#[derive(Deserialize)] +struct PullRequest { + number: u64, + title: String, + author: Author, + #[serde(default)] + queued_at: Option, + #[serde(default)] + priority_alias: Option, + #[serde(default)] + estimated_merge_at: Option, +} + +#[derive(Deserialize)] +struct Author { + login: String, +} + +/// Run the `queue status` command. +pub async fn run(opts: StatusOptions<'_>, output: &mut dyn Output) -> Result<(), CliError> { + let repository = auth::resolve_repository(opts.repository)?; + let token = auth::resolve_token(opts.token)?; + let api_url = auth::resolve_api_url(opts.api_url)?; + + output.status(&format!("Fetching merge queue status for {repository}…"))?; + + let client = HttpClient::new(api_url, token, ApiFlavor::Mergify)?; + let path = build_path(&repository, opts.branch); + + let raw: serde_json::Value = client.get(&path).await?; + + if opts.output_json { + emit_json(output, &raw)?; + } else { + let view: StatusView = serde_json::from_value(raw) + .map_err(|e| CliError::Generic(format!("decode merge queue status response: {e}")))?; + emit_human(output, &repository, &view)?; + } + Ok(()) +} + +fn build_path(repository: &str, branch: Option<&str>) -> String { + let mut path = format!("/v1/repos/{repository}/merge-queue/status"); + if let Some(branch) = branch { + // form_urlencoded::byte_serialize handles spaces, unicode and + // reserved characters. Unencoded slashes are tolerated by + // most servers but encoding is the safe contract. + let encoded: String = form_urlencoded::byte_serialize(branch.as_bytes()).collect(); + path.push_str("?branch="); + path.push_str(&encoded); + } + path +} + +fn emit_json(output: &mut dyn Output, value: &serde_json::Value) -> std::io::Result<()> { + output.emit(value, &mut |w: &mut dyn Write| { + let rendered = serde_json::to_string_pretty(value) + .map_err(|e| std::io::Error::other(e.to_string()))?; + writeln!(w, "{rendered}") + }) +} + +fn emit_human(output: &mut dyn Output, repository: &str, view: &StatusView) -> std::io::Result<()> { + let now = Utc::now(); + let theme = Theme::detect(); + output.emit(&(), &mut |w: &mut dyn Write| { + writeln!( + w, + "{B}Merge Queue: {repository}{R}", + B = theme.bold, + R = theme.reset + )?; + writeln!(w)?; + + if let Some(pause) = &view.pause { + print_pause(w, &theme, pause, now)?; + writeln!(w)?; + } + + if view.batches.is_empty() && view.waiting_pull_requests.is_empty() { + writeln!(w, "{D}Queue is empty{R}", D = theme.dim, R = theme.reset)?; + return Ok(()); + } + + if !view.batches.is_empty() { + print_batches(w, &theme, &view.batches, now)?; + } + + if !view.waiting_pull_requests.is_empty() { + if !view.batches.is_empty() { + writeln!(w)?; + } + print_waiting_prs(w, &theme, &view.waiting_pull_requests, now)?; + } + Ok(()) + }) +} + +/// ANSI styling — opt-in based on stdout being a TTY and `NO_COLOR` +/// being unset (the de-facto standard, ). +/// +/// `anstyle::Style::new()` (the disabled variant for every field) +/// emits no escape sequences in its `Display` impl, so the same +/// formatting code paths produce plain text in non-TTY contexts. +struct Theme { + enabled: bool, + bold: Style, + dim: Style, + /// SGR reset escape, or empty when colors are disabled. Using a + /// `&'static str` instead of `anstyle::Reset` keeps both the + /// styled and plain code paths free of escape sequences when + /// `enabled = false`. + reset: &'static str, + pr_number: Style, + author: Style, + priority: Style, + relative: Style, + pause_warn: Style, +} + +impl Theme { + fn detect() -> Self { + // `cfg!(test)` makes the unit tests deterministic: when + // `cargo test` runs from a terminal the parent stdout *is* a + // TTY, but tests write into in-memory buffers and asserting + // on raw output shouldn't depend on the developer's terminal. + let enabled = !cfg!(test) + && std::io::stdout().is_terminal() + && std::env::var_os("NO_COLOR").is_none(); + Self::new(enabled) + } + + fn new(enabled: bool) -> Self { + let on = |style: Style| if enabled { style } else { Style::new() }; + Self { + enabled, + bold: on(Style::new().bold()), + dim: on(Style::new().dimmed()), + reset: if enabled { "\x1b[0m" } else { "" }, + pr_number: on(Style::new().fg_color(Some(AnsiColor::Cyan.into()))), + author: on(Style::new().dimmed()), + priority: on(Style::new().fg_color(Some(AnsiColor::Magenta.into()))), + relative: on(Style::new().dimmed()), + pause_warn: on(Style::new().bold().fg_color(Some(AnsiColor::Yellow.into()))), + } + } + + /// Return the per-state foreground color for a batch-status icon. + /// Mirrors Python's `STATUS_STYLES` map; unknown codes render + /// with no color (the default terminal foreground). + fn icon_style(&self, code: &str) -> Style { + if !self.enabled { + return Style::new(); + } + let color = match code { + "running" | "merged" => Some(AnsiColor::Green), + "failed" => Some(AnsiColor::Red), + "bisecting" + | "preparing" + | "waiting_for_previous_batches" + | "waiting_for_requeue" + | "waiting_schedule" => Some(AnsiColor::Yellow), + "waiting_for_merge" | "frozen" => Some(AnsiColor::Cyan), + _ => None, + }; + match color { + Some(c) => Style::new().fg_color(Some(c.into())), + None => Style::new().dimmed(), + } + } +} + +fn print_pause( + w: &mut dyn Write, + theme: &Theme, + pause: &Pause, + now: DateTime, +) -> std::io::Result<()> { + let reason = pause.reason.as_deref().unwrap_or(""); + write!( + w, + "{W}⚠ Queue is paused: \"{reason}\"{R}", + W = theme.pause_warn, + R = theme.reset, + )?; + if let Some(ts) = &pause.paused_at { + let rel = relative_time(ts, now, false); + if !rel.is_empty() { + write!(w, " {D}(since {rel}){R}", D = theme.dim, R = theme.reset)?; + } + } + writeln!(w) +} + +fn print_batches( + w: &mut dyn Write, + theme: &Theme, + batches: &[Batch], + now: DateTime, +) -> std::io::Result<()> { + let sorted = topological_sort(batches); + let groups = group_by_scope(&sorted); + let single_scope = groups.len() == 1; + + for (i, (scope, scope_batches)) in groups.iter().enumerate() { + if i > 0 { + writeln!(w)?; + } + let label = if single_scope { + "Batches" + } else { + scope.as_str() + }; + writeln!(w, "{B}{label}{R}", B = theme.bold, R = theme.reset)?; + + let last_batch_idx = scope_batches.len() - 1; + for (bi, batch) in scope_batches.iter().enumerate() { + let is_last_batch = bi == last_batch_idx; + // `├──`/`└──` mark the batch row; the continuation + // column is `│ ` for non-last batches and ` ` for + // the last so the tree closes cleanly. + let branch = if is_last_batch { + "└── " + } else { + "├── " + }; + let continuation = if is_last_batch { " " } else { "│ " }; + print_batch_line(w, theme, branch, batch, now)?; + print_batch_prs(w, theme, continuation, batch)?; + } + } + Ok(()) +} + +fn print_batch_line( + w: &mut dyn Write, + theme: &Theme, + branch: &str, + batch: &Batch, + now: DateTime, +) -> std::io::Result<()> { + let icon = status_icon(&batch.status.code); + let icon_style = theme.icon_style(&batch.status.code); + write!( + w, + "{branch}{S}{icon} {code}{R}", + S = icon_style, + code = batch.status.code, + R = theme.reset, + )?; + if batch.checks_summary.total > 0 { + write!( + w, + " {D}checks {p}/{t}{R}", + D = theme.dim, + p = batch.checks_summary.passed, + t = batch.checks_summary.total, + R = theme.reset, + )?; + } + if let Some(started) = &batch.started_at { + let rel = relative_time(started, now, false); + if !rel.is_empty() { + write!(w, " {D}{rel}{R}", D = theme.relative, R = theme.reset)?; + } + } + if let Some(eta) = &batch.estimated_merge_at { + let rel = relative_time(eta, now, true); + if !rel.is_empty() { + write!(w, " {D}ETA {rel}{R}", D = theme.relative, R = theme.reset)?; + } + } + writeln!(w) +} + +fn print_batch_prs( + w: &mut dyn Write, + theme: &Theme, + continuation: &str, + batch: &Batch, +) -> std::io::Result<()> { + if batch.pull_requests.is_empty() { + return Ok(()); + } + let last_pr_idx = batch.pull_requests.len() - 1; + for (pi, pr) in batch.pull_requests.iter().enumerate() { + let pr_branch = if pi == last_pr_idx { + "└── " + } else { + "├── " + }; + writeln!( + w, + "{continuation}{pr_branch}{N}#{num}{R} {title} {A}({author}){R}", + N = theme.pr_number, + num = pr.number, + title = pr.title, + A = theme.author, + author = pr.author.login, + R = theme.reset, + )?; + } + Ok(()) +} + +fn print_waiting_prs( + w: &mut dyn Write, + theme: &Theme, + prs: &[PullRequest], + now: DateTime, +) -> std::io::Result<()> { + writeln!(w, "{B}Waiting{R}", B = theme.bold, R = theme.reset)?; + for pr in prs { + write!( + w, + " {N}#{num}{R} {title} {A}{author}{R}", + N = theme.pr_number, + num = pr.number, + title = pr.title, + A = theme.author, + author = pr.author.login, + R = theme.reset, + )?; + if let Some(prio) = &pr.priority_alias { + write!(w, " {P}{prio}{R}", P = theme.priority, R = theme.reset)?; + } + if let Some(queued_at) = &pr.queued_at { + let rel = relative_time(queued_at, now, false); + if !rel.is_empty() { + write!( + w, + " {D}queued {rel}{R}", + D = theme.relative, + R = theme.reset, + )?; + } + } + if let Some(eta) = &pr.estimated_merge_at { + let rel = relative_time(eta, now, true); + if !rel.is_empty() { + write!(w, " {D}ETA {rel}{R}", D = theme.relative, R = theme.reset)?; + } + } + writeln!(w)?; + } + Ok(()) +} + +/// Map a batch-status code to a compact Unicode icon. Same icons as +/// the Python implementation; unknown codes fall back to `?`. +fn status_icon(code: &str) -> &'static str { + match code { + "running" => "●", + "bisecting" => "◑", + "preparing" | "waiting_for_batch" => "◌", + "failed" => "✗", + "merged" => "✓", + "waiting_for_merge" => "◎", + "waiting_for_previous_batches" | "waiting_for_requeue" => "⏳", + "waiting_schedule" => "⏰", + "frozen" => "❄", + _ => "?", + } +} + +/// Format an ISO-8601/RFC-3339 timestamp as a relative duration +/// (`s`/`m`/`h`/`d`). Past timestamps render as `"… ago"`; future +/// timestamps as `"~…"` when `future = true`. +/// +/// Returns an empty string when the timestamp can't be parsed — +/// mirrors the Python implementation, which silently degrades on +/// malformed input rather than failing the whole render. +fn relative_time(iso: &str, now: DateTime, future: bool) -> String { + let Ok(parsed) = DateTime::parse_from_rfc3339(iso) else { + return String::new(); + }; + let parsed = parsed.with_timezone(&Utc); + let delta = (now - parsed).num_seconds().abs(); + let value = if delta < 60 { + format!("{delta}s") + } else if delta < 3600 { + format!("{}m", delta / 60) + } else if delta < 86400 { + format!("{}h", delta / 3600) + } else { + format!("{}d", delta / 86400) + }; + if future { + format!("~{value}") + } else { + format!("{value} ago") + } +} + +/// Topological sort of batches by `parent_ids`. Roots come first, +/// children follow their parents — matches the Python +/// `_topological_sort`. Cycles are impossible by API contract, but +/// the `visited` set makes us tolerant of them anyway. +fn topological_sort(batches: &[Batch]) -> Vec<&Batch> { + let id_to_batch: HashMap<&str, &Batch> = batches.iter().map(|b| (b.id.as_str(), b)).collect(); + let mut visited: HashSet<&str> = HashSet::new(); + let mut result: Vec<&Batch> = Vec::with_capacity(batches.len()); + + for batch in batches { + visit(batch.id.as_str(), &id_to_batch, &mut visited, &mut result); + } + result +} + +fn visit<'a>( + id: &'a str, + id_to_batch: &HashMap<&'a str, &'a Batch>, + visited: &mut HashSet<&'a str>, + result: &mut Vec<&'a Batch>, +) { + if !visited.insert(id) { + return; + } + let Some(batch) = id_to_batch.get(id) else { + return; + }; + for parent in &batch.parent_ids { + visit(parent.as_str(), id_to_batch, visited, result); + } + result.push(batch); +} + +/// Group batches by scope, preserving insertion order for the +/// scopes (matches Python dict iteration). A batch with no scopes +/// is grouped under `"default"` to match the Python fallback. A +/// batch with multiple scopes appears in every group it claims — +/// the Python implementation does the same so users see each batch +/// in every scope it affects. +fn group_by_scope<'a>(batches: &[&'a Batch]) -> IndexMap> { + let mut groups: IndexMap> = IndexMap::new(); + for batch in batches { + let scopes: Vec = if batch.scopes.is_empty() { + vec!["default".to_string()] + } else { + batch.scopes.clone() + }; + for scope in scopes { + groups.entry(scope).or_default().push(batch); + } + } + groups +} + +#[cfg(test)] +mod tests { + use mergify_core::OutputMode; + use mergify_core::StdioOutput; + use wiremock::Mock; + use wiremock::MockServer; + use wiremock::ResponseTemplate; + use wiremock::matchers::header; + use wiremock::matchers::method; + use wiremock::matchers::path; + use wiremock::matchers::query_param; + + use super::*; + + type SharedBytes = std::sync::Arc>>; + + struct Captured { + output: StdioOutput, + stdout: SharedBytes, + } + + fn make_output(mode: OutputMode) -> Captured { + let stdout: SharedBytes = std::sync::Arc::new(std::sync::Mutex::new(Vec::new())); + let stderr: SharedBytes = std::sync::Arc::new(std::sync::Mutex::new(Vec::new())); + let output = StdioOutput::with_sinks( + mode, + SharedWriter(std::sync::Arc::clone(&stdout)), + SharedWriter(std::sync::Arc::clone(&stderr)), + ); + Captured { output, stdout } + } + + fn stdout_string(cap: &Captured) -> String { + String::from_utf8(cap.stdout.lock().unwrap().clone()).unwrap() + } + + #[test] + fn build_path_no_branch() { + assert_eq!( + build_path("owner/repo", None), + "/v1/repos/owner/repo/merge-queue/status", + ); + } + + #[test] + fn build_path_with_branch() { + assert_eq!( + build_path("owner/repo", Some("main")), + "/v1/repos/owner/repo/merge-queue/status?branch=main", + ); + } + + #[test] + fn build_path_url_encodes_branch() { + // Slashes and unicode in branch names must survive a round + // trip through the URL — `feature/foo` is common, and + // browser-pasted names occasionally include UTF-8. + let path = build_path("owner/repo", Some("feature/foo bar")); + assert!(path.ends_with("?branch=feature%2Ffoo+bar"), "got {path}"); + } + + #[test] + fn relative_time_seconds() { + let now = DateTime::parse_from_rfc3339("2026-01-01T00:01:00Z") + .unwrap() + .with_timezone(&Utc); + assert_eq!(relative_time("2026-01-01T00:00:30Z", now, false), "30s ago"); + } + + #[test] + fn relative_time_minutes() { + let now = DateTime::parse_from_rfc3339("2026-01-01T01:00:00Z") + .unwrap() + .with_timezone(&Utc); + assert_eq!(relative_time("2026-01-01T00:55:00Z", now, false), "5m ago"); + } + + #[test] + fn relative_time_hours() { + let now = DateTime::parse_from_rfc3339("2026-01-01T05:00:00Z") + .unwrap() + .with_timezone(&Utc); + assert_eq!(relative_time("2026-01-01T00:00:00Z", now, false), "5h ago"); + } + + #[test] + fn relative_time_days() { + let now = DateTime::parse_from_rfc3339("2026-01-08T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + assert_eq!(relative_time("2026-01-01T00:00:00Z", now, false), "7d ago"); + } + + #[test] + fn relative_time_future_prefix() { + // ETA-style timestamps render as `~…` so users can + // distinguish "happened 5m ago" from "in 5m". + let now = DateTime::parse_from_rfc3339("2026-01-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + assert_eq!(relative_time("2026-01-01T00:30:00Z", now, true), "~30m"); + } + + #[test] + fn relative_time_unparseable_returns_empty() { + // Mirrors Python: a malformed timestamp shouldn't fail the + // whole render — degrade gracefully so the rest of the + // status block still appears. + let now = DateTime::parse_from_rfc3339("2026-01-01T00:00:00Z") + .unwrap() + .with_timezone(&Utc); + assert_eq!(relative_time("not-a-date", now, false), ""); + } + + #[test] + fn topological_sort_orders_parents_before_children() { + // Construct three batches, child references parent. Even if + // the input is in reverse order, the sort must put the + // parent first. + let batches = vec![ + sample_batch("c", &["b"]), + sample_batch("b", &["a"]), + sample_batch("a", &[]), + ]; + let sorted = topological_sort(&batches); + let ids: Vec<&str> = sorted.iter().map(|b| b.id.as_str()).collect(); + assert_eq!(ids, vec!["a", "b", "c"]); + } + + #[test] + fn topological_sort_handles_missing_parent_ids() { + // When `parent_ids` references an id that isn't in the + // batches list (the API has dropped it for some reason), + // the sort skips it instead of panicking. + let batches = [sample_batch("only", &["nonexistent"])]; + let sorted = topological_sort(&batches); + assert_eq!(sorted.len(), 1); + assert_eq!(sorted[0].id, "only"); + } + + #[test] + fn group_by_scope_default_when_empty_scopes() { + let batches = [sample_batch("a", &[])]; + let refs: Vec<&Batch> = batches.iter().collect(); + let groups = group_by_scope(&refs); + assert_eq!(groups.len(), 1); + assert!(groups.contains_key("default")); + } + + #[test] + fn group_by_scope_assigns_to_each_listed_scope() { + // Matches Python: a multi-scope batch appears under each + // scope's group, not just the first. + let mut b = sample_batch("a", &[]); + b.scopes = vec!["foo".to_string(), "bar".to_string()]; + let batches = [b]; + let refs: Vec<&Batch> = batches.iter().collect(); + let groups = group_by_scope(&refs); + assert_eq!(groups.len(), 2); + assert!(groups.contains_key("foo")); + assert!(groups.contains_key("bar")); + } + + #[test] + fn status_icon_known_codes() { + assert_eq!(status_icon("running"), "●"); + assert_eq!(status_icon("merged"), "✓"); + assert_eq!(status_icon("failed"), "✗"); + } + + #[test] + fn status_icon_unknown_falls_back() { + assert_eq!(status_icon("brand-new-status"), "?"); + } + + fn sample_batch(id: &str, parents: &[&str]) -> Batch { + Batch { + id: id.to_string(), + parent_ids: parents.iter().copied().map(String::from).collect(), + scopes: Vec::new(), + status: BatchStatus { + code: "running".to_string(), + }, + started_at: None, + estimated_merge_at: None, + checks_summary: ChecksSummary { + passed: 0, + total: 0, + }, + pull_requests: Vec::new(), + } + } + + #[tokio::test] + async fn run_json_passes_response_through_verbatim() { + // JSON mode is a passthrough — every field the server sends, + // including ones we don't render, must survive intact. + // `extra_field` here proves we don't reshape on the way out. + let server = MockServer::start().await; + let response = serde_json::json!({ + "batches": [], + "waiting_pull_requests": [], + "scope_queues": {"default": []}, + "pause": null, + "extra_field": "preserved", + }); + Mock::given(method("GET")) + .and(path("/v1/repos/owner/repo/merge-queue/status")) + .and(header("Authorization", "Bearer t")) + .respond_with(ResponseTemplate::new(200).set_body_json(response.clone())) + .expect(1) + .mount(&server) + .await; + + let mut cap = make_output(OutputMode::Human); + let api_url = server.uri(); + run( + StatusOptions { + repository: Some("owner/repo"), + token: Some("t"), + api_url: Some(&api_url), + branch: None, + output_json: true, + }, + &mut cap.output, + ) + .await + .unwrap(); + + let stdout = stdout_string(&cap); + let parsed: serde_json::Value = serde_json::from_str(stdout.trim()).unwrap(); + assert_eq!(parsed, response); + } + + #[tokio::test] + async fn run_human_renders_paused_queue() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/v1/repos/owner/repo/merge-queue/status")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "batches": [], + "waiting_pull_requests": [], + "scope_queues": {}, + "pause": {"reason": "deploy freeze", "paused_at": "2026-01-01T00:00:00Z"}, + }))) + .expect(1) + .mount(&server) + .await; + + let mut cap = make_output(OutputMode::Human); + let api_url = server.uri(); + run( + StatusOptions { + repository: Some("owner/repo"), + token: Some("t"), + api_url: Some(&api_url), + branch: None, + output_json: false, + }, + &mut cap.output, + ) + .await + .unwrap(); + + let stdout = stdout_string(&cap); + assert!(stdout.contains("Merge Queue: owner/repo"), "got {stdout}"); + assert!(stdout.contains("Queue is paused"), "got {stdout}"); + assert!(stdout.contains("deploy freeze"), "got {stdout}"); + assert!(stdout.contains("Queue is empty"), "got {stdout}"); + } + + #[tokio::test] + async fn run_human_renders_empty_queue() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/v1/repos/owner/repo/merge-queue/status")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "batches": [], + "waiting_pull_requests": [], + "scope_queues": {}, + "pause": null, + }))) + .mount(&server) + .await; + + let mut cap = make_output(OutputMode::Human); + let api_url = server.uri(); + run( + StatusOptions { + repository: Some("owner/repo"), + token: Some("t"), + api_url: Some(&api_url), + branch: None, + output_json: false, + }, + &mut cap.output, + ) + .await + .unwrap(); + + let stdout = stdout_string(&cap); + assert!(stdout.contains("Queue is empty"), "got {stdout}"); + } + + #[tokio::test] + async fn run_human_renders_batches_and_waiting_prs() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/v1/repos/owner/repo/merge-queue/status")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "batches": [{ + "id": "b1", + "name": "batch-1", + "status": {"code": "running"}, + "checks_summary": {"passed": 3, "total": 5}, + "started_at": "2026-01-01T00:00:00Z", + "estimated_merge_at": "2026-01-01T01:00:00Z", + "pull_requests": [ + { + "number": 42, + "title": "Add feature foo", + "url": "https://example.test/42", + "author": {"id": 1, "login": "alice"}, + "queued_at": "2026-01-01T00:00:00Z", + "priority_alias": "default", + "priority_rule_name": "default", + "labels": [], + "scopes": [], + }, + ], + "parent_ids": [], + }], + "waiting_pull_requests": [ + { + "number": 43, + "title": "Update deps", + "url": "https://example.test/43", + "author": {"id": 2, "login": "bob"}, + "queued_at": "2026-01-01T00:00:00Z", + "priority_alias": "high", + "priority_rule_name": "high", + "labels": [], + "scopes": [], + }, + ], + "scope_queues": {}, + "pause": null, + }))) + .mount(&server) + .await; + + let mut cap = make_output(OutputMode::Human); + let api_url = server.uri(); + run( + StatusOptions { + repository: Some("owner/repo"), + token: Some("t"), + api_url: Some(&api_url), + branch: None, + output_json: false, + }, + &mut cap.output, + ) + .await + .unwrap(); + + let stdout = stdout_string(&cap); + assert!(stdout.contains("Batches"), "got {stdout}"); + assert!(stdout.contains("running"), "got {stdout}"); + assert!(stdout.contains("checks 3/5"), "got {stdout}"); + assert!( + stdout.contains("#42 Add feature foo (alice)"), + "got {stdout}" + ); + assert!(stdout.contains("Waiting"), "got {stdout}"); + assert!(stdout.contains("#43"), "got {stdout}"); + assert!(stdout.contains("Update deps"), "got {stdout}"); + assert!(stdout.contains("bob"), "got {stdout}"); + assert!(stdout.contains("high"), "got {stdout}"); + } + + #[tokio::test] + async fn run_human_groups_batches_by_scope_when_multiple() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/v1/repos/owner/repo/merge-queue/status")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "batches": [ + { + "id": "b1", + "status": {"code": "running"}, + "checks_summary": {"passed": 0, "total": 0}, + "pull_requests": [], + "scopes": ["frontend"], + "parent_ids": [], + }, + { + "id": "b2", + "status": {"code": "preparing"}, + "checks_summary": {"passed": 0, "total": 0}, + "pull_requests": [], + "scopes": ["backend"], + "parent_ids": [], + }, + ], + "waiting_pull_requests": [], + "scope_queues": {}, + "pause": null, + }))) + .mount(&server) + .await; + + let mut cap = make_output(OutputMode::Human); + let api_url = server.uri(); + run( + StatusOptions { + repository: Some("owner/repo"), + token: Some("t"), + api_url: Some(&api_url), + branch: None, + output_json: false, + }, + &mut cap.output, + ) + .await + .unwrap(); + + let stdout = stdout_string(&cap); + // Two scopes → each labelled by its own name (no + // generic "Batches" header). + assert!(stdout.contains("frontend"), "got {stdout}"); + assert!(stdout.contains("backend"), "got {stdout}"); + assert!(!stdout.contains("\nBatches\n"), "got {stdout}"); + } + + #[tokio::test] + async fn run_passes_branch_query_param() { + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/v1/repos/owner/repo/merge-queue/status")) + .and(query_param("branch", "main")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "batches": [], + "waiting_pull_requests": [], + "scope_queues": {}, + "pause": null, + }))) + .expect(1) + .mount(&server) + .await; + + let mut cap = make_output(OutputMode::Human); + let api_url = server.uri(); + run( + StatusOptions { + repository: Some("owner/repo"), + token: Some("t"), + api_url: Some(&api_url), + branch: Some("main"), + output_json: false, + }, + &mut cap.output, + ) + .await + .unwrap(); + } + + #[tokio::test] + async fn run_tolerates_missing_optional_fields() { + // The API has historically dropped optional fields entirely + // rather than serializing them as null. Deserialization + // must accept that — the response below has neither + // `pause` nor any of the per-batch optional timestamps. + let server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/v1/repos/owner/repo/merge-queue/status")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({ + "batches": [{ + "id": "b1", + "status": {"code": "running"}, + "checks_summary": {"passed": 0, "total": 0}, + "pull_requests": [], + }], + "waiting_pull_requests": [], + "scope_queues": {}, + }))) + .mount(&server) + .await; + + let mut cap = make_output(OutputMode::Human); + let api_url = server.uri(); + run( + StatusOptions { + repository: Some("owner/repo"), + token: Some("t"), + api_url: Some(&api_url), + branch: None, + output_json: false, + }, + &mut cap.output, + ) + .await + .unwrap(); + } + + struct SharedWriter(SharedBytes); + impl Write for SharedWriter { + fn write(&mut self, bytes: &[u8]) -> std::io::Result { + self.0.lock().unwrap().extend_from_slice(bytes); + Ok(bytes.len()) + } + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } + } +} diff --git a/mergify_cli/queue/api.py b/mergify_cli/queue/api.py index 8c2b3fa1..1b6b977f 100644 --- a/mergify_cli/queue/api.py +++ b/mergify_cli/queue/api.py @@ -7,77 +7,6 @@ import httpx -class QueuePullRequestAuthor(typing.TypedDict): - id: int - login: str - - -class QueuePullRequest(typing.TypedDict, total=False): - number: typing.Required[int] - title: typing.Required[str] - url: typing.Required[str] - author: typing.Required[QueuePullRequestAuthor] - queued_at: typing.Required[str] - priority_alias: typing.Required[str] - priority_rule_name: typing.Required[str] - labels: typing.Required[list[str]] - scopes: typing.Required[list[str]] - estimated_merge_at: str | None - - -class QueueChecksSummary(typing.TypedDict): - passed: int - total: int - - -class QueueBatchStatus(typing.TypedDict): - code: str - - -class QueueBatch(typing.TypedDict, total=False): - id: typing.Required[str] - name: typing.Required[str] - status: typing.Required[QueueBatchStatus] - started_at: typing.Required[str] - estimated_merge_at: typing.Required[str] - checks_summary: typing.Required[QueueChecksSummary] - pull_requests: typing.Required[list[QueuePullRequest]] - parent_ids: list[str] - batch_filled_slots: int | None - max_batch_slots: int | None - batch_max_start_at: str | None - scopes: list[str] - sub_batches: list[typing.Any] | None - - -class QueuePause(typing.TypedDict): - reason: str - paused_at: str - - -class QueueStatusResponse(typing.TypedDict, total=False): - batches: typing.Required[list[QueueBatch]] - waiting_pull_requests: typing.Required[list[QueuePullRequest]] - scope_queues: typing.Required[dict[str, typing.Any]] - pause: QueuePause | None - - -async def get_queue_status( - client: httpx.AsyncClient, - repository: str, - *, - branch: str | None = None, -) -> QueueStatusResponse: - params: dict[str, str] = {} - if branch is not None: - params["branch"] = branch - response = await client.get( - f"/v1/repos/{repository}/merge-queue/status", - params=params, - ) - return response.json() # type: ignore[no-any-return] - - class QueueRule(typing.TypedDict): name: str config: dict[str, typing.Any] diff --git a/mergify_cli/queue/cli.py b/mergify_cli/queue/cli.py index 7acb5216..f52eb6d6 100644 --- a/mergify_cli/queue/cli.py +++ b/mergify_cli/queue/cli.py @@ -14,20 +14,6 @@ from mergify_cli.queue import api as queue_api -STATUS_STYLES: dict[str, tuple[str, str]] = { - "running": ("●", "green"), - "bisecting": ("◑", "yellow"), - "preparing": ("◌", "yellow"), - "failed": ("✗", "red"), - "merged": ("✓", "dim green"), - "waiting_for_merge": ("◎", "cyan"), - "waiting_for_previous_batches": ("⏳", "yellow"), - "waiting_for_requeue": ("↻", "yellow"), - "waiting_schedule": ("⏰", "yellow"), - "waiting_for_batch": ("⏳", "dim"), - "frozen": ("❄", "cyan"), -} - CHECK_STATE_STYLES: dict[str, tuple[str, str]] = { "success": ("✓", "green"), "pending": ("◌", "yellow"), @@ -67,110 +53,6 @@ def _relative_time(iso_str: str | None, *, future: bool = False) -> str: return f"{value} ago" -def _status_text(code: str) -> Text: - icon, style = STATUS_STYLES.get(code, ("?", "dim")) - text = Text() - text.append(f"{icon} ", style=style) - text.append(code, style=style) - return text - - -def _batch_label(batch: queue_api.QueueBatch) -> Text: - label = _status_text(batch["status"]["code"]) - checks = batch["checks_summary"] - if checks["total"] > 0: - label.append(f" checks {checks['passed']}/{checks['total']}", style="dim") - started = batch.get("started_at") - if started: - rel = _relative_time(started) - if rel: - label.append(f" {rel}", style="dim") - eta = batch.get("estimated_merge_at") - if eta: - rel = _relative_time(eta, future=True) - if rel: - label.append(f" ETA {rel}", style="dim") - return label - - -def _pr_label(pr: queue_api.QueuePullRequest) -> Text: - text = Text() - text.append(f"#{pr['number']}", style="cyan") - text.append(f" {pr['title']}") - text.append(f" ({pr['author']['login']})", style="dim") - return text - - -def _topological_sort( - batches: list[queue_api.QueueBatch], -) -> list[queue_api.QueueBatch]: - id_to_batch = {b["id"]: b for b in batches} - visited: set[str] = set() - result: list[queue_api.QueueBatch] = [] - - def visit(batch_id: str) -> None: - if batch_id in visited: - return - visited.add(batch_id) - batch = id_to_batch.get(batch_id) - if batch is None: - return - for parent_id in batch.get("parent_ids") or []: - visit(parent_id) - result.append(batch) - - for b in batches: - visit(b["id"]) - return result - - -def _group_batches_by_scope( - batches: list[queue_api.QueueBatch], -) -> dict[str, list[queue_api.QueueBatch]]: - groups: dict[str, list[queue_api.QueueBatch]] = {} - for batch in batches: - scopes = batch.get("scopes") or ["default"] - for scope in scopes: - groups.setdefault(scope, []).append(batch) - return groups - - -def _print_batches(batches: list[queue_api.QueueBatch]) -> None: - sorted_batches = _topological_sort(batches) - scope_groups = _group_batches_by_scope(sorted_batches) - all_scopes = list(scope_groups.keys()) - single_scope = len(all_scopes) == 1 - - for scope in all_scopes: - scope_batches = scope_groups[scope] - label = "Batches" if single_scope else scope - tree = Tree(Text(label, style="bold")) - for batch in scope_batches: - batch_node = tree.add(_batch_label(batch)) - for pr in batch["pull_requests"]: - batch_node.add(_pr_label(pr)) - console.print(tree) - - -def _print_waiting_prs(pull_requests: list[queue_api.QueuePullRequest]) -> None: - console.print(Text("Waiting", style="bold")) - for pr in pull_requests: - line = Text(" ") - line.append(f"#{pr['number']}", style="cyan") - line.append(f" {pr['title']}") - line.append(f" {pr['author']['login']}", style="dim") - line.append(f" {pr['priority_alias']}", style="magenta") - queued_rel = _relative_time(pr["queued_at"]) - if queued_rel: - line.append(f" queued {queued_rel}", style="dim") - eta = pr.get("estimated_merge_at") - if eta: - eta_rel = _relative_time(eta, future=True) - if eta_rel: - line.append(f" ETA {eta_rel}", style="dim") - console.print(line) - - def _print_pull_metadata(data: queue_api.QueuePullResponse) -> None: console.print(Text(f"PR #{data['number']}", style="bold")) console.print() @@ -365,73 +247,6 @@ def queue( click.echo(ctx.get_help()) -@queue.command(help="Show merge queue status for the repository") -@click.option( - "--branch", - "-b", - default=None, - help="Branch name to filter the queue", -) -@click.option( - "--json", - "output_json", - is_flag=True, - help="Output in JSON format", -) -@click.pass_context -@utils.run_with_asyncio -async def status(ctx: click.Context, *, branch: str | None, output_json: bool) -> None: - async with utils.get_mergify_http_client( - ctx.obj["api_url"], - ctx.obj["token"], - ) as client: - data = await queue_api.get_queue_status( - client, - ctx.obj["repository"], - branch=branch, - ) - - if output_json: - import json - - # JSON output is a passthrough of the Mergify API response. - # The schema is Mergify's API contract, not this CLI's — the - # Rust port must preserve this passthrough behavior. - click.echo(json.dumps(data, indent=2)) - return - - console.print( - Text(f"Merge Queue: {ctx.obj['repository']}", style="bold"), - ) - console.print() - - pause = data.get("pause") - if pause is not None: - pause_rel = _relative_time(pause["paused_at"]) - pause_text = Text() - pause_text.append("⚠ Queue is paused: ", style="bold yellow") - pause_text.append(f'"{pause["reason"]}"') - if pause_rel: - pause_text.append(f" (since {pause_rel})", style="dim") - console.print(pause_text) - console.print() - - batches = data["batches"] - waiting = data["waiting_pull_requests"] - - if not batches and not waiting: - console.print("Queue is empty") - return - - if batches: - _print_batches(batches) - - if waiting: - if batches: - console.print() - _print_waiting_prs(waiting) - - @queue.command(help="Show detailed state of a pull request in the merge queue") @click.argument("pr_number", type=int) @click.option( diff --git a/mergify_cli/tests/queue/test_cli.py b/mergify_cli/tests/queue/test_cli.py index 387380aa..d8451f93 100644 --- a/mergify_cli/tests/queue/test_cli.py +++ b/mergify_cli/tests/queue/test_cli.py @@ -1,71 +1,9 @@ from __future__ import annotations import datetime -import typing from unittest.mock import patch -from click.testing import CliRunner -from httpx import Response -import respx - from mergify_cli.queue.cli import _relative_time -from mergify_cli.queue.cli import _topological_sort -from mergify_cli.queue.cli import queue -from mergify_cli.tests import utils as test_utils - - -FAKE_PR = { - "number": 123, - "title": "Add feature X", - "url": "https://github.com/owner/repo/pull/123", - "author": {"id": 1, "login": "octocat"}, - "queued_at": "2025-11-05T10:00:00Z", - "priority_alias": "medium", - "priority_rule_name": "default", - "labels": [], - "scopes": ["main"], - "estimated_merge_at": "2025-11-05T11:00:00Z", -} - -FAKE_BATCH = { - "id": "550e8400-e29b-41d4-a716-446655440000", - "name": "batch-1", - "status": {"code": "running"}, - "started_at": "2025-11-05T10:00:00Z", - "estimated_merge_at": "2025-11-05T11:00:00Z", - "checks_summary": {"passed": 5, "total": 10}, - "pull_requests": [FAKE_PR], - "parent_ids": [], - "scopes": ["main"], - "sub_batches": None, -} - -FAKE_PAUSE = { - "reason": "Deploying hotfix", - "paused_at": "2025-11-05T14:00:00Z", -} - -BASE_ARGS = [ - "--token", - "test-token", - "--api-url", - "https://api.mergify.com", - "--repository", - "owner/repo", -] - - -def _invoke_status( - mock: respx.MockRouter, - response_json: dict[str, typing.Any], - extra_args: list[str] | None = None, -) -> typing.Any: - mock.get("/v1/repos/owner/repo/merge-queue/status").mock( - return_value=Response(200, json=response_json), - ) - runner = CliRunner() - args = [*BASE_ARGS, "status", *(extra_args or [])] - return runner.invoke(queue, args) class TestRelativeTime: @@ -114,292 +52,3 @@ def test_none(self) -> None: def test_empty(self) -> None: assert not _relative_time("") - - -class TestTopologicalSort: - def test_no_parents(self) -> None: - batches = [ - {**FAKE_BATCH, "id": "a", "parent_ids": []}, - {**FAKE_BATCH, "id": "b", "parent_ids": []}, - ] - result = _topological_sort(batches) # type: ignore[arg-type] - assert [b["id"] for b in result] == ["a", "b"] - - def test_chain(self) -> None: - batches = [ - {**FAKE_BATCH, "id": "c", "parent_ids": ["b"]}, - {**FAKE_BATCH, "id": "a", "parent_ids": []}, - {**FAKE_BATCH, "id": "b", "parent_ids": ["a"]}, - ] - result = _topological_sort(batches) # type: ignore[arg-type] - assert [b["id"] for b in result] == ["a", "b", "c"] - - def test_diamond(self) -> None: - batches = [ - {**FAKE_BATCH, "id": "d", "parent_ids": ["b", "c"]}, - {**FAKE_BATCH, "id": "b", "parent_ids": ["a"]}, - {**FAKE_BATCH, "id": "c", "parent_ids": ["a"]}, - {**FAKE_BATCH, "id": "a", "parent_ids": []}, - ] - result = _topological_sort(batches) # type: ignore[arg-type] - ids = [b["id"] for b in result] - assert ids.index("a") < ids.index("b") - assert ids.index("a") < ids.index("c") - assert ids.index("b") < ids.index("d") - assert ids.index("c") < ids.index("d") - - -class TestStatusCommand: - def test_empty_queue(self) -> None: - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [], - "waiting_pull_requests": [], - "scope_queues": {}, - }, - ) - assert result.exit_code == 0, result.output - assert "Merge Queue: owner/repo" in result.output - assert "Queue is empty" in result.output - - def test_with_batches(self) -> None: - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [FAKE_BATCH], - "waiting_pull_requests": [], - "scope_queues": {}, - }, - ) - assert result.exit_code == 0, result.output - assert "Batches" in result.output - assert "running" in result.output - assert "5/10" in result.output - assert "#123" in result.output - assert "Add feature X" in result.output - assert "octocat" in result.output - - def test_with_waiting_prs(self) -> None: - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [], - "waiting_pull_requests": [FAKE_PR], - "scope_queues": {}, - }, - ) - assert result.exit_code == 0, result.output - assert "Waiting" in result.output - assert "#123" in result.output - assert "Add feature X" in result.output - assert "octocat" in result.output - assert "medium" in result.output - - def test_with_batches_and_waiting_prs(self) -> None: - waiting_pr = { - **FAKE_PR, - "number": 456, - "title": "Another PR", - "author": {"id": 2, "login": "hubot"}, - } - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [FAKE_BATCH], - "waiting_pull_requests": [waiting_pr], - "scope_queues": {}, - }, - ) - assert result.exit_code == 0, result.output - assert "Batches" in result.output - assert "Waiting" in result.output - assert "#123" in result.output - assert "#456" in result.output - - def test_paused(self) -> None: - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [FAKE_BATCH], - "waiting_pull_requests": [], - "scope_queues": {}, - "pause": FAKE_PAUSE, - }, - ) - assert result.exit_code == 0, result.output - assert "paused" in result.output.lower() - assert "Deploying hotfix" in result.output - - def test_paused_empty_queue(self) -> None: - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [], - "waiting_pull_requests": [], - "scope_queues": {}, - "pause": FAKE_PAUSE, - }, - ) - assert result.exit_code == 0, result.output - assert "paused" in result.output.lower() - assert "Queue is empty" in result.output - - def test_json_output(self) -> None: - api_response = { - "batches": [FAKE_BATCH], - "waiting_pull_requests": [FAKE_PR], - "scope_queues": {}, - } - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status(mock, api_response, extra_args=["--json"]) - assert result.exit_code == 0, result.output - data = test_utils.assert_stdout_is_single_json_document(result.output) - assert len(data["batches"]) == 1 - assert len(data["waiting_pull_requests"]) == 1 - - def test_branch_filter(self) -> None: - with respx.mock(base_url="https://api.mergify.com") as mock: - route = mock.get( - "/v1/repos/owner/repo/merge-queue/status", - params={"branch": "release"}, - ).mock( - return_value=Response( - 200, - json={ - "batches": [], - "waiting_pull_requests": [], - "scope_queues": {}, - }, - ), - ) - runner = CliRunner() - result = runner.invoke( - queue, - [*BASE_ARGS, "status", "--branch", "release"], - ) - assert result.exit_code == 0, result.output - assert route.called - - def test_api_error(self) -> None: - with respx.mock(base_url="https://api.mergify.com") as mock: - mock.get("/v1/repos/owner/repo/merge-queue/status").mock( - return_value=Response(403, json={"message": "Forbidden"}), - ) - runner = CliRunner() - result = runner.invoke(queue, [*BASE_ARGS, "status"]) - assert result.exit_code != 0 - - def test_pr_without_eta(self) -> None: - pr_no_eta = {**FAKE_PR, "estimated_merge_at": None} - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [], - "waiting_pull_requests": [pr_no_eta], - "scope_queues": {}, - }, - ) - assert result.exit_code == 0, result.output - assert "#123" in result.output - - def test_multi_scope(self) -> None: - batch_main = { - **FAKE_BATCH, - "id": "aaa", - "scopes": ["main"], - } - batch_staging = { - **FAKE_BATCH, - "id": "bbb", - "scopes": ["staging"], - "status": {"code": "preparing"}, - "pull_requests": [ - { - **FAKE_PR, - "number": 456, - "title": "Staging fix", - "author": {"id": 2, "login": "hubot"}, - }, - ], - } - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [batch_main, batch_staging], - "waiting_pull_requests": [], - "scope_queues": {}, - }, - ) - assert result.exit_code == 0, result.output - assert "main" in result.output - assert "staging" in result.output - assert "#123" in result.output - assert "#456" in result.output - - def test_multi_pr_batch(self) -> None: - pr2 = { - **FAKE_PR, - "number": 789, - "title": "Second PR", - "author": {"id": 3, "login": "alice"}, - } - batch = { - **FAKE_BATCH, - "pull_requests": [FAKE_PR, pr2], - } - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [batch], - "waiting_pull_requests": [], - "scope_queues": {}, - }, - ) - assert result.exit_code == 0, result.output - assert "#123" in result.output - assert "#789" in result.output - assert "alice" in result.output - - def test_status_icons(self) -> None: - batch_failed = { - **FAKE_BATCH, - "status": {"code": "failed"}, - } - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [batch_failed], - "waiting_pull_requests": [], - "scope_queues": {}, - }, - ) - assert result.exit_code == 0, result.output - assert "failed" in result.output - - def test_checks_omitted_when_zero(self) -> None: - batch_no_checks = { - **FAKE_BATCH, - "checks_summary": {"passed": 0, "total": 0}, - } - with respx.mock(base_url="https://api.mergify.com") as mock: - result = _invoke_status( - mock, - { - "batches": [batch_no_checks], - "waiting_pull_requests": [], - "scope_queues": {}, - }, - ) - assert result.exit_code == 0, result.output - assert "0/0" not in result.output diff --git a/mergify_cli/tests/queue/test_skill.py b/mergify_cli/tests/queue/test_skill.py index 1982d735..e701b83b 100644 --- a/mergify_cli/tests/queue/test_skill.py +++ b/mergify_cli/tests/queue/test_skill.py @@ -65,7 +65,7 @@ def test_skill_has_required_sections() -> None: # Rust-native queue commands. Each port PR appends to this list when # it deletes the Python copy, so the validation below stays accurate # without needing to spawn the Rust binary at test time. -NATIVE_QUEUE_COMMANDS: frozenset[str] = frozenset({"pause", "unpause"}) +NATIVE_QUEUE_COMMANDS: frozenset[str] = frozenset({"pause", "unpause", "status"}) def test_skill_references_valid_commands() -> None: