diff --git a/Cargo.lock b/Cargo.lock
index 76b54c60..727fa7a6 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -316,7 +316,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c6d47a4e2961fb8721bcfc54feae6455f2f64e7054f9bc67e875f0e77f4c58d"
dependencies = [
"rust_decimal",
- "schemars 1.2.0",
+ "schemars",
"serde",
"utf8-width",
]
@@ -596,7 +596,7 @@ dependencies = [
"rstest 0.25.0",
"rstest_reuse",
"runner-shared",
- "schemars 0.8.22",
+ "schemars",
"semver",
"serde",
"serde_json",
@@ -3249,33 +3249,22 @@ dependencies = [
[[package]]
name = "schemars"
-version = "0.8.22"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615"
-dependencies = [
- "dyn-clone",
- "schemars_derive",
- "serde",
- "serde_json",
-]
-
-[[package]]
-name = "schemars"
-version = "1.2.0"
+version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54e910108742c57a770f492731f99be216a52fadd361b06c8fb59d74ccc267d2"
+checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc"
dependencies = [
"dyn-clone",
"ref-cast",
+ "schemars_derive",
"serde",
"serde_json",
]
[[package]]
name = "schemars_derive"
-version = "0.8.22"
+version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d"
+checksum = "7d115b50f4aaeea07e79c1912f645c7513d81715d0420f8bc77a18c6260b307f"
dependencies = [
"proc-macro2",
"quote",
diff --git a/Cargo.toml b/Cargo.toml
index 4b312705..b89f6d6d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -37,7 +37,7 @@ tokio-util = "0.7.16"
md5 = "0.7.0"
base64 = "0.21.0"
async-compression = { version = "0.4.5", features = ["tokio", "gzip"] }
-schemars = "0.8"
+schemars = "1.2.1"
simplelog = { version = "0.12.1", default-features = false, features = [
"termcolor",
] }
diff --git a/docs/architecture.md b/docs/architecture.md
new file mode 100644
index 00000000..6b655cc5
--- /dev/null
+++ b/docs/architecture.md
@@ -0,0 +1,84 @@
+# Architecture
+
+## Execution flow
+
+This diagram shows how the CLI commands, project config, orchestrator (with providers), and executors interact during a benchmark run.
+
+```mermaid
+graph TD
+ subgraph "1. CLI Commands"
+ RUN["codspeed run <command>"]
+ EXEC["codspeed exec <command>"]
+ end
+
+ subgraph "2. Config"
+ PROJ_CFG["ProjectConfig
(codspeed.yaml in repo)
benchmark targets, defaults"]
+ MERGER["ConfigMerger
CLI args > project config > defaults"]
+ end
+
+ subgraph "3. Orchestrator"
+ ORCH_CFG["OrchestratorConfig
targets, modes, upload settings"]
+ ORCH["Orchestrator"]
+
+ PROVIDER{" "}
+ LOCAL["LocalProvider"]
+ CI["CI Providers
(GitHub Actions, GitLab, Buildkite)"]
+ PROVIDER_JOIN{" "}
+
+ subgraph "Executor (per mode × per target)"
+ SETUP["1. Setup"]
+ RUN_STEP["2. Run"]
+ TEARDOWN["3. Teardown"]
+ end
+
+ UPLOAD["Upload all results to CodSpeed"]
+ end
+
+ subgraph "4. Auth"
+ CS_CFG["CodSpeedConfig
(~/.config/codspeed/config.yaml)"]
+ OIDC["OIDC / env token"]
+ end
+
+ %% CLI → Config → OrchestratorConfig
+ RUN --> MERGER
+ EXEC --> MERGER
+ MERGER --> PROJ_CFG
+ PROJ_CFG -->|"merged config"| ORCH_CFG
+
+ %% CLI → Orchestrator
+ RUN -->|"single command →
Entrypoint target"| ORCH_CFG
+ RUN -->|"no command + config →
Exec & Entrypoint targets"| ORCH_CFG
+ EXEC -->|"always creates
Exec target"| ORCH_CFG
+
+ %% Orchestrator init
+ ORCH_CFG -->|"Orchestrator::new()"| ORCH
+
+ %% Provider detection
+ ORCH -->|"auto-detect env"| PROVIDER
+ PROVIDER --> LOCAL
+ PROVIDER --> CI
+
+ %% Auth → Providers
+ CS_CFG -->|"auth token"| LOCAL
+ OIDC -->|"OIDC / env token"| CI
+
+ %% Providers → Upload
+ LOCAL -->|"token + run metadata"| PROVIDER_JOIN{" "}
+ CI -->|"token + run metadata"| PROVIDER_JOIN
+ PROVIDER_JOIN --> UPLOAD
+
+ %% Orchestrator spawns executors
+ ORCH -->|"for each target × mode:
spawn executor"| SETUP
+ SETUP --> RUN_STEP
+ RUN_STEP --> TEARDOWN
+
+ %% All executors done → upload
+ TEARDOWN -->|"collect results"| UPLOAD
+```
+
+### Key interactions
+
+- **CLI → Config**: Both `run` and `exec` merge CLI args with `ProjectConfig` (CLI takes precedence). `run` can source targets from project config; `exec` always creates an `Exec` target.
+- **CLI → Orchestrator**: The merged config becomes an `OrchestratorConfig` holding all targets and modes.
+- **Orchestrator → Providers**: Auto-detects environment (Local vs CI). Local uses the auth token from `CodSpeedConfig`; CI providers handle OIDC tokens.
+- **Orchestrator → Executors**: Groups all `Exec` targets into one exec-harness pipe command, runs each `Entrypoint` independently. For each target group, iterates over all modes, creating an `ExecutionContext` per mode and dispatching to the matching executor (`Valgrind`/`WallTime`/`Memory`). After all runs complete, uploads results with provider metadata.
diff --git a/schemas/codspeed.schema.json b/schemas/codspeed.schema.json
index 8340e15b..9374fd89 100644
--- a/schemas/codspeed.schema.json
+++ b/schemas/codspeed.schema.json
@@ -1,60 +1,56 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "ProjectConfig",
- "description": "Project-level configuration from codspeed.yaml file\n\nThis configuration provides default options for the run and exec commands. CLI arguments always take precedence over config file values.",
+ "description": "Project-level configuration from codspeed.yaml file\n\nThis configuration provides default options for the run and exec commands.\nCLI arguments always take precedence over config file values.",
"type": "object",
"properties": {
- "benchmarks": {
- "description": "List of benchmark targets to execute",
- "type": [
- "array",
- "null"
- ],
- "items": {
- "$ref": "#/definitions/Target"
- }
- },
"options": {
"description": "Default options to apply to all benchmark runs",
- "anyOf": [
+ "oneOf": [
{
- "$ref": "#/definitions/ProjectOptions"
+ "$ref": "#/$defs/ProjectOptions"
},
{
"type": "null"
}
]
+ },
+ "benchmarks": {
+ "description": "List of benchmark targets to execute",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "$ref": "#/$defs/Target"
+ }
}
},
- "definitions": {
+ "$defs": {
"ProjectOptions": {
"description": "Root-level options that apply to all benchmark runs unless overridden by CLI",
"type": "object",
"properties": {
- "max-rounds": {
- "description": "Maximum number of rounds",
+ "working-directory": {
+ "description": "Working directory where commands will be executed (relative to config file)",
"type": [
- "integer",
+ "string",
"null"
- ],
- "format": "uint64",
- "minimum": 0.0
+ ]
},
- "max-time": {
- "description": "Maximum total execution time",
+ "warmup-time": {
+ "description": "Duration of warmup phase (e.g., \"1s\", \"500ms\")",
"type": [
"string",
"null"
]
},
- "min-rounds": {
- "description": "Minimum number of rounds",
+ "max-time": {
+ "description": "Maximum total execution time",
"type": [
- "integer",
+ "string",
"null"
- ],
- "format": "uint64",
- "minimum": 0.0
+ ]
},
"min-time": {
"description": "Minimum total execution time",
@@ -63,35 +59,39 @@
"null"
]
},
- "warmup-time": {
- "description": "Duration of warmup phase (e.g., \"1s\", \"500ms\")",
+ "max-rounds": {
+ "description": "Maximum number of rounds",
"type": [
- "string",
+ "integer",
"null"
- ]
+ ],
+ "format": "uint64",
+ "minimum": 0
},
- "working-directory": {
- "description": "Working directory where commands will be executed (relative to config file)",
+ "min-rounds": {
+ "description": "Minimum number of rounds",
"type": [
- "string",
+ "integer",
"null"
- ]
+ ],
+ "format": "uint64",
+ "minimum": 0
}
}
},
"Target": {
- "description": "A benchmark target to execute",
+ "description": "A benchmark target to execute.\n\nEither `exec` or `entrypoint` must be specified (mutually exclusive).",
"type": "object",
- "required": [
- "exec"
- ],
"properties": {
- "exec": {
- "description": "Command to execute",
- "type": "string"
- },
"name": {
- "description": "Optional name for this target",
+ "description": "Optional name for this target (display purposes only)",
+ "type": [
+ "string",
+ "null"
+ ]
+ },
+ "id": {
+ "description": "Optional id to run a subset of targets (e.g. `codspeed run --bench my_id`)",
"type": [
"string",
"null"
@@ -99,29 +99,53 @@
},
"options": {
"description": "Target-specific options",
- "anyOf": [
+ "oneOf": [
{
- "$ref": "#/definitions/TargetOptions"
+ "$ref": "#/$defs/TargetOptions"
},
{
"type": "null"
}
]
}
- }
+ },
+ "oneOf": [
+ {
+ "description": "Command measured by exec-harness",
+ "type": "object",
+ "properties": {
+ "exec": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "exec"
+ ]
+ },
+ {
+ "description": "Command with built-in benchmark harness",
+ "type": "object",
+ "properties": {
+ "entrypoint": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "entrypoint"
+ ]
+ }
+ ]
},
"TargetOptions": {
"description": "Walltime execution options matching WalltimeExecutionArgs structure",
"type": "object",
"properties": {
- "max-rounds": {
- "description": "Maximum number of rounds",
+ "warmup-time": {
+ "description": "Duration of warmup phase (e.g., \"1s\", \"500ms\")",
"type": [
- "integer",
+ "string",
"null"
- ],
- "format": "uint64",
- "minimum": 0.0
+ ]
},
"max-time": {
"description": "Maximum total execution time",
@@ -130,15 +154,6 @@
"null"
]
},
- "min-rounds": {
- "description": "Minimum number of rounds",
- "type": [
- "integer",
- "null"
- ],
- "format": "uint64",
- "minimum": 0.0
- },
"min-time": {
"description": "Minimum total execution time",
"type": [
@@ -146,12 +161,23 @@
"null"
]
},
- "warmup-time": {
- "description": "Duration of warmup phase (e.g., \"1s\", \"500ms\")",
+ "max-rounds": {
+ "description": "Maximum number of rounds",
"type": [
- "string",
+ "integer",
"null"
- ]
+ ],
+ "format": "uint64",
+ "minimum": 0
+ },
+ "min-rounds": {
+ "description": "Minimum number of rounds",
+ "type": [
+ "integer",
+ "null"
+ ],
+ "format": "uint64",
+ "minimum": 0
}
}
}
diff --git a/src/bin/generate_config_schema.rs b/src/bin/generate_config_schema.rs
index 72996a08..e37b8625 100644
--- a/src/bin/generate_config_schema.rs
+++ b/src/bin/generate_config_schema.rs
@@ -8,12 +8,31 @@
use std::fs;
use codspeed_runner::ProjectConfig;
-use schemars::schema_for;
+use schemars::Schema;
+use schemars::generate::SchemaSettings;
+use schemars::transform::{Transform, transform_subschemas};
const OUTPUT_FILE: &str = "schemas/codspeed.schema.json";
+/// Rewrites `anyOf` to `oneOf` in all schemas (used for untagged enums
+/// where variants are mutually exclusive).
+#[derive(Clone)]
+struct AnyOfToOneOf;
+
+impl Transform for AnyOfToOneOf {
+ fn transform(&mut self, schema: &mut Schema) {
+ if let Some(any_of) = schema.remove("anyOf") {
+ schema.insert("oneOf".to_string(), any_of);
+ }
+ transform_subschemas(self, schema);
+ }
+}
+
fn main() {
- let schema = schema_for!(ProjectConfig);
+ let generator = SchemaSettings::default()
+ .with_transform(AnyOfToOneOf)
+ .into_generator();
+ let schema = generator.into_root_schema_for::();
let schema_json = serde_json::to_string_pretty(&schema).expect("Failed to serialize schema");
let output_file_path = std::path::Path::new(OUTPUT_FILE);
fs::create_dir_all(output_file_path.parent().unwrap())
diff --git a/src/cli/exec/mod.rs b/src/cli/exec/mod.rs
index 3f7f0d01..accdf706 100644
--- a/src/cli/exec/mod.rs
+++ b/src/cli/exec/mod.rs
@@ -1,8 +1,9 @@
use super::ExecAndRunSharedArgs;
use crate::api_client::CodSpeedAPIClient;
-use crate::binary_installer::ensure_binary_installed;
use crate::config::CodSpeedConfig;
use crate::executor;
+use crate::executor::config::{self, OrchestratorConfig, RepositoryOverride};
+use crate::instruments::Instruments;
use crate::prelude::*;
use crate::project_config::ProjectConfig;
use crate::project_config::merger::ConfigMerger;
@@ -10,29 +11,15 @@ use crate::upload::UploadResult;
use crate::upload::poll_results::{PollResultsOptions, poll_results};
use clap::Args;
use std::path::Path;
+use url::Url;
pub mod multi_targets;
/// We temporarily force this name for all exec runs
pub const DEFAULT_REPOSITORY_NAME: &str = "local-runs";
-const EXEC_HARNESS_COMMAND: &str = "exec-harness";
-const EXEC_HARNESS_VERSION: &str = "1.2.0";
-
-/// Wraps a command with exec-harness and the given walltime arguments.
-///
-/// This produces a shell command string like:
-/// `exec-harness --warmup-time 1s --max-rounds 10 sleep 0.1`
-pub fn wrap_with_exec_harness(
- walltime_args: &exec_harness::walltime::WalltimeExecutionArgs,
- command: &[String],
-) -> String {
- shell_words::join(
- std::iter::once(EXEC_HARNESS_COMMAND)
- .chain(walltime_args.to_cli_args().iter().map(|s| s.as_str()))
- .chain(command.iter().map(|s| s.as_str())),
- )
-}
+pub const EXEC_HARNESS_COMMAND: &str = "exec-harness";
+pub const EXEC_HARNESS_VERSION: &str = "1.2.0";
#[derive(Args, Debug)]
pub struct ExecArgs {
@@ -72,6 +59,42 @@ impl ExecArgs {
}
}
+fn build_orchestrator_config(
+ args: ExecArgs,
+ target: executor::BenchmarkTarget,
+) -> Result {
+ let modes = args.shared.resolve_modes()?;
+ let raw_upload_url = args
+ .shared
+ .upload_url
+ .unwrap_or_else(|| config::DEFAULT_UPLOAD_URL.into());
+ let upload_url = Url::parse(&raw_upload_url)
+ .map_err(|e| anyhow!("Invalid upload URL: {raw_upload_url}, {e}"))?;
+
+ Ok(OrchestratorConfig {
+ upload_url,
+ token: args.shared.token,
+ repository_override: args
+ .shared
+ .repository
+ .map(|repo| RepositoryOverride::from_arg(repo, args.shared.provider))
+ .transpose()?,
+ working_directory: args.shared.working_directory,
+ targets: vec![target],
+ modes,
+ instruments: Instruments { mongodb: None }, // exec doesn't support MongoDB
+ perf_unwinding_mode: args.shared.perf_run_args.perf_unwinding_mode,
+ enable_perf: args.shared.perf_run_args.enable_perf,
+ simulation_tool: args.shared.simulation_tool.unwrap_or_default(),
+ profile_folder: args.shared.profile_folder,
+ skip_upload: args.shared.skip_upload,
+ skip_run: args.shared.skip_run,
+ skip_setup: args.shared.skip_setup,
+ allow_empty: args.shared.allow_empty,
+ go_runner_version: args.shared.go_runner_version,
+ })
+}
+
pub async fn run(
args: ExecArgs,
api_client: &CodSpeedAPIClient,
@@ -80,44 +103,33 @@ pub async fn run(
setup_cache_dir: Option<&Path>,
) -> Result<()> {
let merged_args = args.merge_with_project_config(project_config);
- let config = crate::executor::Config::try_from(merged_args)?;
+ let target = executor::BenchmarkTarget::Exec {
+ command: merged_args.command.clone(),
+ name: merged_args.name.clone(),
+ walltime_args: merged_args.walltime_args.clone(),
+ };
+ let config = build_orchestrator_config(merged_args, target)?;
- execute_with_harness(config, api_client, codspeed_config, setup_cache_dir).await
+ execute_config(config, api_client, codspeed_config, setup_cache_dir).await
}
-/// Core execution logic for exec-harness based runs.
+/// Core execution logic shared by `codspeed exec` and `codspeed run` with config targets.
///
-/// This function handles exec-harness installation and benchmark execution with exec-style
-/// result polling. It is used by both `codspeed exec` directly and by `codspeed run` when
-/// executing targets defined in codspeed.yaml.
-pub async fn execute_with_harness(
- mut config: crate::executor::Config,
+/// Sets up the orchestrator and drives execution. Exec-harness installation is handled
+/// by the orchestrator when exec targets are present.
+pub async fn execute_config(
+ config: OrchestratorConfig,
api_client: &CodSpeedAPIClient,
codspeed_config: &CodSpeedConfig,
setup_cache_dir: Option<&Path>,
) -> Result<()> {
- let orchestrator =
- executor::Orchestrator::new(&mut config, codspeed_config, api_client).await?;
+ let orchestrator = executor::Orchestrator::new(config, codspeed_config, api_client).await?;
if !orchestrator.is_local() {
super::show_banner();
}
- debug!("config: {config:#?}");
-
- let get_exec_harness_installer_url = || {
- format!(
- "https://github.com/CodSpeedHQ/codspeed/releases/download/exec-harness-v{EXEC_HARNESS_VERSION}/exec-harness-installer.sh"
- )
- };
-
- // Ensure the exec-harness is installed
- ensure_binary_installed(
- EXEC_HARNESS_COMMAND,
- EXEC_HARNESS_VERSION,
- get_exec_harness_installer_url,
- )
- .await?;
+ debug!("config: {:#?}", orchestrator.config);
let poll_opts = PollResultsOptions::for_exec();
let poll_results_fn = async |upload_result: &UploadResult| {
@@ -125,7 +137,7 @@ pub async fn execute_with_harness(
};
orchestrator
- .execute(&mut config, setup_cache_dir, poll_results_fn)
+ .execute(setup_cache_dir, poll_results_fn)
.await?;
Ok(())
diff --git a/src/cli/exec/multi_targets.rs b/src/cli/exec/multi_targets.rs
index b7c25c62..1841f8a4 100644
--- a/src/cli/exec/multi_targets.rs
+++ b/src/cli/exec/multi_targets.rs
@@ -1,36 +1,9 @@
use super::EXEC_HARNESS_COMMAND;
+use crate::executor::config::BenchmarkTarget;
use crate::prelude::*;
-use crate::project_config::Target;
-use crate::project_config::WalltimeOptions;
+use crate::project_config::{Target, TargetCommand, WalltimeOptions};
use exec_harness::BenchmarkCommand;
-/// Convert targets from project config to exec-harness JSON input format
-pub fn targets_to_exec_harness_json(
- targets: &[Target],
- default_walltime: Option<&WalltimeOptions>,
-) -> Result {
- let inputs: Vec = targets
- .iter()
- .map(|target| {
- // Parse the exec string into command parts
- let command = shell_words::split(&target.exec)
- .with_context(|| format!("Failed to parse command: {}", target.exec))?;
-
- // Merge target-specific walltime options with defaults
- let target_walltime = target.options.as_ref().and_then(|o| o.walltime.as_ref());
- let walltime_args = merge_walltime_options(default_walltime, target_walltime);
-
- Ok(BenchmarkCommand {
- command,
- name: target.name.clone(),
- walltime_args,
- })
- })
- .collect::>>()?;
-
- serde_json::to_string(&inputs).context("Failed to serialize targets to JSON")
-}
-
/// Merge default walltime options with target-specific overrides
fn merge_walltime_options(
default: Option<&WalltimeOptions>,
@@ -66,19 +39,62 @@ fn walltime_options_to_args(
}
}
-/// Build a command that pipes targets JSON to exec-harness via stdin
-pub fn build_pipe_command(
+/// Convert project config targets into [`BenchmarkTarget`] instances.
+///
+/// Exec targets are each converted to a `BenchmarkTarget::Exec`.
+/// Entrypoint targets are each converted to a `BenchmarkTarget::Entrypoint`.
+pub fn build_benchmark_targets(
targets: &[Target],
default_walltime: Option<&WalltimeOptions>,
-) -> Result> {
- let json = targets_to_exec_harness_json(targets, default_walltime)?;
- // Use a heredoc to safely pass the JSON to exec-harness
- Ok(vec![
- EXEC_HARNESS_COMMAND.to_owned(),
- "-".to_owned(),
- "<<".to_owned(),
- "'CODSPEED_EOF'\n".to_owned(),
- json,
- "\nCODSPEED_EOF".to_owned(),
- ])
+) -> Result> {
+ targets
+ .iter()
+ .map(|target| match &target.command {
+ TargetCommand::Exec { exec } => {
+ let command = shell_words::split(exec)
+ .with_context(|| format!("Failed to parse command: {exec}"))?;
+ let target_walltime = target.options.as_ref().and_then(|o| o.walltime.as_ref());
+ let walltime_args = merge_walltime_options(default_walltime, target_walltime);
+ Ok(BenchmarkTarget::Exec {
+ command,
+ name: target.name.clone(),
+ walltime_args,
+ })
+ }
+ TargetCommand::Entrypoint { entrypoint } => Ok(BenchmarkTarget::Entrypoint {
+ command: entrypoint.clone(),
+ name: target.name.clone(),
+ }),
+ })
+ .collect()
+}
+
+/// Build a shell command string that pipes BenchmarkTarget::Exec variants to exec-harness via stdin
+pub fn build_exec_targets_pipe_command(
+ targets: &[&crate::executor::config::BenchmarkTarget],
+) -> Result {
+ let inputs: Vec = targets
+ .iter()
+ .map(|target| match target {
+ crate::executor::config::BenchmarkTarget::Exec {
+ command,
+ name,
+ walltime_args,
+ } => Ok(BenchmarkCommand {
+ command: command.clone(),
+ name: name.clone(),
+ walltime_args: walltime_args.clone(),
+ }),
+ crate::executor::config::BenchmarkTarget::Entrypoint { .. } => {
+ bail!("Entrypoint targets cannot be used with exec-harness pipe command")
+ }
+ })
+ .collect::>>()?;
+
+ let json = serde_json::to_string(&inputs).context("Failed to serialize targets to JSON")?;
+ Ok(build_pipe_command_from_json(&json))
+}
+
+fn build_pipe_command_from_json(json: &str) -> String {
+ format!("{EXEC_HARNESS_COMMAND} - <<'CODSPEED_EOF'\n{json}\nCODSPEED_EOF")
}
diff --git a/src/cli/run/mod.rs b/src/cli/run/mod.rs
index 358d7d80..1d57094b 100644
--- a/src/cli/run/mod.rs
+++ b/src/cli/run/mod.rs
@@ -2,7 +2,8 @@ use super::ExecAndRunSharedArgs;
use crate::api_client::CodSpeedAPIClient;
use crate::config::CodSpeedConfig;
use crate::executor;
-use crate::executor::Config;
+use crate::executor::config::{self, OrchestratorConfig, RepositoryOverride};
+use crate::instruments::Instruments;
use crate::prelude::*;
use crate::project_config::ProjectConfig;
use crate::project_config::merger::ConfigMerger;
@@ -11,6 +12,7 @@ use crate::upload::UploadResult;
use crate::upload::poll_results::{PollResultsOptions, poll_results};
use clap::{Args, ValueEnum};
use std::path::Path;
+use url::Url;
pub mod helpers;
pub mod logger;
@@ -91,14 +93,49 @@ impl RunArgs {
}
}
-use crate::project_config::Target;
-use crate::project_config::WalltimeOptions;
+fn build_orchestrator_config(
+ args: RunArgs,
+ targets: Vec,
+) -> Result {
+ let instruments = Instruments::try_from(&args)?;
+ let modes = args.shared.resolve_modes()?;
+ let raw_upload_url = args
+ .shared
+ .upload_url
+ .unwrap_or_else(|| config::DEFAULT_UPLOAD_URL.into());
+ let upload_url = Url::parse(&raw_upload_url)
+ .map_err(|e| anyhow!("Invalid upload URL: {raw_upload_url}, {e}"))?;
+
+ Ok(OrchestratorConfig {
+ upload_url,
+ token: args.shared.token,
+ repository_override: args
+ .shared
+ .repository
+ .map(|repo| RepositoryOverride::from_arg(repo, args.shared.provider))
+ .transpose()?,
+ working_directory: args.shared.working_directory,
+ targets,
+ modes,
+ instruments,
+ perf_unwinding_mode: args.shared.perf_run_args.perf_unwinding_mode,
+ enable_perf: args.shared.perf_run_args.enable_perf,
+ simulation_tool: args.shared.simulation_tool.unwrap_or_default(),
+ profile_folder: args.shared.profile_folder,
+ skip_upload: args.shared.skip_upload,
+ skip_run: args.shared.skip_run,
+ skip_setup: args.shared.skip_setup,
+ allow_empty: args.shared.allow_empty,
+ go_runner_version: args.shared.go_runner_version,
+ })
+}
+
+use crate::project_config::{Target, WalltimeOptions};
/// Determines the execution mode based on CLI args and project config
enum RunTarget<'a> {
/// Single command from CLI args
SingleCommand(RunArgs),
/// Multiple targets from project config
- /// Note: for now, only `codspeed exec` targets are supported in the project config
ConfigTargets {
args: RunArgs,
targets: &'a [Target],
@@ -141,35 +178,40 @@ pub async fn run(
match run_target {
RunTarget::SingleCommand(args) => {
- let mut config = Config::try_from(args)?;
-
+ let command = args.command.join(" ");
+ let config = build_orchestrator_config(
+ args,
+ vec![executor::BenchmarkTarget::Entrypoint {
+ command,
+ name: None,
+ }],
+ )?;
let orchestrator =
- executor::Orchestrator::new(&mut config, codspeed_config, api_client).await?;
+ executor::Orchestrator::new(config, codspeed_config, api_client).await?;
if !orchestrator.is_local() {
super::show_banner();
}
- debug!("config: {config:#?}");
+ debug!("config: {:#?}", orchestrator.config);
let poll_opts = PollResultsOptions::for_run(output_json);
let poll_results_fn = async |upload_result: &UploadResult| {
poll_results(api_client, upload_result, &poll_opts).await
};
orchestrator
- .execute(&mut config, setup_cache_dir, poll_results_fn)
+ .execute(setup_cache_dir, poll_results_fn)
.await?;
}
RunTarget::ConfigTargets {
- mut args,
+ args,
targets,
default_walltime,
} => {
- args.command =
- super::exec::multi_targets::build_pipe_command(targets, default_walltime)?;
- let config = Config::try_from(args)?;
-
- super::exec::execute_with_harness(config, api_client, codspeed_config, setup_cache_dir)
+ let benchmark_targets =
+ super::exec::multi_targets::build_benchmark_targets(targets, default_walltime)?;
+ let config = build_orchestrator_config(args, benchmark_targets)?;
+ super::exec::execute_config(config, api_client, codspeed_config, setup_cache_dir)
.await?;
}
}
diff --git a/src/executor/config.rs b/src/executor/config.rs
index daaba52f..fb8e8e5b 100644
--- a/src/executor/config.rs
+++ b/src/executor/config.rs
@@ -1,6 +1,4 @@
use crate::cli::UnwindingMode;
-use crate::cli::exec::wrap_with_exec_harness;
-use crate::cli::run::RunArgs;
use crate::instruments::Instruments;
use crate::prelude::*;
use crate::run_environment::RepositoryProvider;
@@ -10,6 +8,28 @@ use semver::Version;
use std::path::PathBuf;
use url::Url;
+/// A benchmark target from project configuration.
+///
+/// Defines how a benchmark is executed:
+/// - `Exec`: a plain command measured by exec-harness (all exec targets share one invocation)
+/// - `Entrypoint`: a command that already contains benchmark harnessing (run independently)
+#[derive(Debug, Clone)]
+pub enum BenchmarkTarget {
+ /// A command measured by exec-harness (e.g. `ls -al /nix/store`)
+ Exec {
+ command: Vec,
+ name: Option,
+ walltime_args: exec_harness::walltime::WalltimeExecutionArgs,
+ },
+ /// A command with built-in harness (e.g. `pytest --codspeed src`)
+ Entrypoint {
+ command: String,
+ // We do not use it yet, temporarily allow
+ #[allow(dead_code)]
+ name: Option,
+ },
+}
+
/// The Valgrind tool to use for simulation mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, ValueEnum)]
pub enum SimulationTool {
@@ -20,20 +40,20 @@ pub enum SimulationTool {
Tracegrind,
}
-/// Execution configuration for running benchmarks.
+/// Run-level configuration owned by the orchestrator.
///
-/// This struct contains all the configuration parameters needed to execute benchmarks,
-/// including API settings, execution mode, instrumentation options, and various flags
-/// for controlling the execution flow. It is typically constructed from command-line
-/// arguments via [`RunArgs`] and combined with [`CodSpeedConfig`] to create an
-/// [`ExecutionContext`].
+/// Holds all parameters that are constant across benchmark targets within a run,
+/// plus the list of targets to execute.
+/// Constructed from CLI arguments and passed to [`Orchestrator::new`].
+/// Use [`OrchestratorConfig::executor_config_for_command`] to produce a per-execution [`ExecutorConfig`].
#[derive(Debug, Clone)]
-pub struct Config {
+pub struct OrchestratorConfig {
pub upload_url: Url,
pub token: Option,
pub repository_override: Option,
pub working_directory: Option,
- pub command: String,
+
+ pub targets: Vec,
pub modes: Vec,
pub instruments: Instruments,
@@ -53,6 +73,34 @@ pub struct Config {
pub go_runner_version: Option,
}
+/// Per-execution configuration passed to executors.
+///
+/// Produced by [`OrchestratorConfig::executor_config_for_command`]; holds the `command` string
+/// that the executor will run, plus executor-specific fields.
+/// Fields that are only needed at the orchestrator level (e.g. `upload_url`,
+/// `skip_upload`, `repository_override`) live on [`OrchestratorConfig`].
+#[derive(Debug, Clone)]
+pub struct ExecutorConfig {
+ pub token: Option,
+ pub working_directory: Option,
+ pub command: String,
+
+ pub instruments: Instruments,
+ pub enable_perf: bool,
+ /// Stack unwinding mode for perf (if enabled)
+ pub perf_unwinding_mode: Option,
+
+ pub simulation_tool: SimulationTool,
+
+ pub profile_folder: Option,
+ pub skip_run: bool,
+ pub skip_setup: bool,
+ /// If true, allow execution even when no benchmarks are found
+ pub allow_empty: bool,
+ /// The version of go-runner to install (if None, installs latest)
+ pub go_runner_version: Option,
+}
+
#[derive(Debug, Clone, PartialEq)]
pub struct RepositoryOverride {
pub owner: String,
@@ -77,29 +125,70 @@ impl RepositoryOverride {
}
}
-impl Config {
+pub const DEFAULT_UPLOAD_URL: &str = "https://api.codspeed.io/upload";
+
+impl OrchestratorConfig {
pub fn set_token(&mut self, token: Option) {
self.token = token;
}
- /// Create a clone of this config pinned to a single mode.
- pub fn for_mode(&self, mode: &RunnerMode) -> Config {
- let mut c = self.clone();
- c.modes = vec![mode.clone()];
- c
+ /// Compute the total number of executor runs that will be performed.
+ ///
+ /// All `Exec` targets are combined into a single invocation, while each
+ /// `Entrypoint` target runs independently. Both are multiplied by the
+ /// number of configured modes.
+ pub fn expected_run_parts_count(&self) -> u32 {
+ let has_exec = self
+ .targets
+ .iter()
+ .any(|t| matches!(t, BenchmarkTarget::Exec { .. }));
+ let entrypoint_count = self
+ .targets
+ .iter()
+ .filter(|t| matches!(t, BenchmarkTarget::Entrypoint { .. }))
+ .count();
+ let invocation_count = (if has_exec { 1 } else { 0 }) + entrypoint_count;
+ (invocation_count * self.modes.len()) as u32
+ }
+
+ /// Produce a per-execution [`ExecutorConfig`] for the given command and mode.
+ pub fn executor_config_for_command(&self, command: String) -> ExecutorConfig {
+ ExecutorConfig {
+ token: self.token.clone(),
+ working_directory: self.working_directory.clone(),
+ command,
+ instruments: self.instruments.clone(),
+ enable_perf: self.enable_perf,
+ perf_unwinding_mode: self.perf_unwinding_mode,
+ simulation_tool: self.simulation_tool,
+ profile_folder: self.profile_folder.clone(),
+ skip_run: self.skip_run,
+ skip_setup: self.skip_setup,
+ allow_empty: self.allow_empty,
+ go_runner_version: self.go_runner_version.clone(),
+ }
+ }
+}
+
+impl ExecutorConfig {
+ pub fn set_token(&mut self, token: Option) {
+ self.token = token;
}
}
#[cfg(test)]
-impl Config {
- /// Constructs a new `Config` with default values for testing purposes
+impl OrchestratorConfig {
+ /// Constructs a new `OrchestratorConfig` with default values for testing purposes
pub fn test() -> Self {
Self {
upload_url: Url::parse(DEFAULT_UPLOAD_URL).unwrap(),
token: None,
repository_override: None,
working_directory: None,
- command: "".into(),
+ targets: vec![BenchmarkTarget::Entrypoint {
+ command: String::new(),
+ name: None,
+ }],
modes: vec![RunnerMode::Simulation],
instruments: Instruments::test(),
perf_unwinding_mode: None,
@@ -115,196 +204,119 @@ impl Config {
}
}
-const DEFAULT_UPLOAD_URL: &str = "https://api.codspeed.io/upload";
-
-impl TryFrom for Config {
- type Error = Error;
- fn try_from(args: RunArgs) -> Result {
- let instruments = Instruments::try_from(&args)?;
- let modes = args.shared.resolve_modes()?;
- let raw_upload_url = args
- .shared
- .upload_url
- .unwrap_or_else(|| DEFAULT_UPLOAD_URL.into());
- let upload_url = Url::parse(&raw_upload_url)
- .map_err(|e| anyhow!("Invalid upload URL: {raw_upload_url}, {e}"))?;
-
- Ok(Self {
- upload_url,
- token: args.shared.token,
- repository_override: args
- .shared
- .repository
- .map(|repo| RepositoryOverride::from_arg(repo, args.shared.provider))
- .transpose()?,
- working_directory: args.shared.working_directory,
- modes,
- instruments,
- perf_unwinding_mode: args.shared.perf_run_args.perf_unwinding_mode,
- enable_perf: args.shared.perf_run_args.enable_perf,
- command: args.command.join(" "),
- simulation_tool: args.shared.simulation_tool.unwrap_or_default(),
- profile_folder: args.shared.profile_folder,
- skip_upload: args.shared.skip_upload,
- skip_run: args.shared.skip_run,
- skip_setup: args.shared.skip_setup,
- allow_empty: args.shared.allow_empty,
- go_runner_version: args.shared.go_runner_version,
- })
- }
-}
-
-impl Config {
- /// Create a Config from ExecArgs with a custom command (used for targets mode)
- pub fn try_from_with_command(
- args: crate::cli::exec::ExecArgs,
- command: String,
- ) -> Result {
- let modes = args.shared.resolve_modes()?;
- let raw_upload_url = args
- .shared
- .upload_url
- .unwrap_or_else(|| DEFAULT_UPLOAD_URL.into());
- let upload_url = Url::parse(&raw_upload_url)
- .map_err(|e| anyhow!("Invalid upload URL: {raw_upload_url}, {e}"))?;
-
- Ok(Self {
- upload_url,
- token: args.shared.token,
- repository_override: args
- .shared
- .repository
- .map(|repo| RepositoryOverride::from_arg(repo, args.shared.provider))
- .transpose()?,
- working_directory: args.shared.working_directory,
- modes,
- instruments: Instruments { mongodb: None }, // exec doesn't support MongoDB
- perf_unwinding_mode: args.shared.perf_run_args.perf_unwinding_mode,
- enable_perf: args.shared.perf_run_args.enable_perf,
- command,
- simulation_tool: args.shared.simulation_tool.unwrap_or_default(),
- profile_folder: args.shared.profile_folder,
- skip_upload: args.shared.skip_upload,
- skip_run: args.shared.skip_run,
- skip_setup: args.shared.skip_setup,
- allow_empty: args.shared.allow_empty,
- go_runner_version: args.shared.go_runner_version,
- })
- }
-}
-
-impl TryFrom for Config {
- type Error = Error;
- fn try_from(args: crate::cli::exec::ExecArgs) -> Result {
- let wrapped_command = wrap_with_exec_harness(&args.walltime_args, &args.command);
- Self::try_from_with_command(args, wrapped_command)
+#[cfg(test)]
+impl ExecutorConfig {
+ /// Constructs a new `ExecutorConfig` with default values for testing purposes
+ pub fn test() -> Self {
+ OrchestratorConfig::test().executor_config_for_command("".into())
}
}
#[cfg(test)]
mod tests {
- use crate::cli::PerfRunArgs;
- use crate::instruments::MongoDBConfig;
-
use super::*;
#[test]
- fn test_try_from_env_empty() {
- let config = Config::try_from(RunArgs {
- shared: crate::cli::ExecAndRunSharedArgs {
- upload_url: None,
- token: None,
- repository: None,
- provider: None,
- working_directory: None,
- mode: vec![RunnerMode::Simulation],
- simulation_tool: None,
- profile_folder: None,
- skip_upload: false,
- skip_run: false,
- skip_setup: false,
- allow_empty: false,
- go_runner_version: None,
- perf_run_args: PerfRunArgs {
- enable_perf: false,
- perf_unwinding_mode: None,
+ fn test_expected_run_parts_count() {
+ use crate::runner_mode::RunnerMode;
+
+ let base = OrchestratorConfig::test();
+
+ // Single entrypoint, single mode → 1
+ let config = OrchestratorConfig {
+ targets: vec![BenchmarkTarget::Entrypoint {
+ command: "cmd".into(),
+ name: None,
+ }],
+ modes: vec![RunnerMode::Simulation],
+ ..base.clone()
+ };
+ assert_eq!(config.expected_run_parts_count(), 1);
+
+ // Two entrypoints, single mode → 2
+ let config = OrchestratorConfig {
+ targets: vec![
+ BenchmarkTarget::Entrypoint {
+ command: "cmd1".into(),
+ name: None,
},
- },
- instruments: vec![],
- mongo_uri_env_name: None,
- message_format: None,
- command: vec!["cargo".into(), "codspeed".into(), "bench".into()],
- })
- .unwrap();
- assert_eq!(config.upload_url, Url::parse(DEFAULT_UPLOAD_URL).unwrap());
- assert_eq!(config.token, None);
- assert_eq!(config.repository_override, None);
- assert_eq!(config.working_directory, None);
- assert_eq!(config.instruments, Instruments { mongodb: None });
- assert!(!config.skip_upload);
- assert!(!config.skip_run);
- assert!(!config.skip_setup);
- assert!(!config.allow_empty);
- assert_eq!(config.command, "cargo codspeed bench");
- }
+ BenchmarkTarget::Entrypoint {
+ command: "cmd2".into(),
+ name: None,
+ },
+ ],
+ modes: vec![RunnerMode::Simulation],
+ ..base.clone()
+ };
+ assert_eq!(config.expected_run_parts_count(), 2);
- #[test]
- fn test_try_from_args() {
- let config = Config::try_from(RunArgs {
- shared: crate::cli::ExecAndRunSharedArgs {
- upload_url: Some("https://example.com/upload".into()),
- token: Some("token".into()),
- repository: Some("owner/repo".into()),
- provider: Some(RepositoryProvider::GitLab),
- working_directory: Some("/tmp".into()),
- mode: vec![RunnerMode::Simulation],
- simulation_tool: None,
- profile_folder: Some("./codspeed.out".into()),
- skip_upload: true,
- skip_run: true,
- skip_setup: true,
- allow_empty: true,
- go_runner_version: None,
- perf_run_args: PerfRunArgs {
- enable_perf: false,
- perf_unwinding_mode: Some(UnwindingMode::FramePointer),
+ // Multiple exec targets count as one invocation, single mode → 1
+ let config = OrchestratorConfig {
+ targets: vec![
+ BenchmarkTarget::Exec {
+ command: vec!["exec1".into()],
+ name: None,
+ walltime_args: Default::default(),
},
- },
- instruments: vec!["mongodb".into()],
- mongo_uri_env_name: Some("MONGODB_URI".into()),
- message_format: Some(crate::cli::run::MessageFormat::Json),
- command: vec!["cargo".into(), "codspeed".into(), "bench".into()],
- })
- .unwrap();
+ BenchmarkTarget::Exec {
+ command: vec!["exec2".into()],
+ name: None,
+ walltime_args: Default::default(),
+ },
+ ],
+ modes: vec![RunnerMode::Simulation],
+ ..base.clone()
+ };
+ assert_eq!(config.expected_run_parts_count(), 1);
- assert_eq!(
- config.upload_url,
- Url::parse("https://example.com/upload").unwrap()
- );
- assert_eq!(config.token, Some("token".into()));
- assert_eq!(
- config.repository_override,
- Some(RepositoryOverride {
- owner: "owner".into(),
- repository: "repo".into(),
- repository_provider: RepositoryProvider::GitLab,
- })
- );
- assert_eq!(config.working_directory, Some("/tmp".into()));
- assert_eq!(
- config.instruments,
- Instruments {
- mongodb: Some(MongoDBConfig {
- uri_env_name: Some("MONGODB_URI".into())
- })
- }
- );
- assert_eq!(config.profile_folder, Some("./codspeed.out".into()));
- assert!(config.skip_upload);
- assert!(config.skip_run);
- assert!(config.skip_setup);
- assert!(config.allow_empty);
- assert_eq!(config.command, "cargo codspeed bench");
+ // Mix of exec and entrypoint, single mode → 2
+ let config = OrchestratorConfig {
+ targets: vec![
+ BenchmarkTarget::Exec {
+ command: vec!["exec1".into()],
+ name: None,
+ walltime_args: Default::default(),
+ },
+ BenchmarkTarget::Entrypoint {
+ command: "cmd".into(),
+ name: None,
+ },
+ ],
+ modes: vec![RunnerMode::Simulation],
+ ..base.clone()
+ };
+ assert_eq!(config.expected_run_parts_count(), 2);
+
+ // Single entrypoint, two modes → 2
+ #[allow(deprecated)]
+ let config = OrchestratorConfig {
+ targets: vec![BenchmarkTarget::Entrypoint {
+ command: "cmd".into(),
+ name: None,
+ }],
+ modes: vec![RunnerMode::Simulation, RunnerMode::Walltime],
+ ..base.clone()
+ };
+ assert_eq!(config.expected_run_parts_count(), 2);
+
+ // Mix of exec and entrypoint, two modes → 4
+ #[allow(deprecated)]
+ let config = OrchestratorConfig {
+ targets: vec![
+ BenchmarkTarget::Exec {
+ command: vec!["exec1".into()],
+ name: None,
+ walltime_args: Default::default(),
+ },
+ BenchmarkTarget::Entrypoint {
+ command: "cmd".into(),
+ name: None,
+ },
+ ],
+ modes: vec![RunnerMode::Simulation, RunnerMode::Walltime],
+ ..base.clone()
+ };
+ assert_eq!(config.expected_run_parts_count(), 4);
}
#[test]
@@ -331,40 +343,4 @@ mod tests {
let result = RepositoryOverride::from_arg("CodSpeedHQ_runner".to_string(), None);
assert!(result.is_err());
}
-
- #[test]
- fn test_try_from_exec_args_default_url() {
- let exec_args = crate::cli::exec::ExecArgs {
- shared: crate::cli::ExecAndRunSharedArgs {
- upload_url: None,
- token: None,
- repository: None,
- provider: None,
- working_directory: None,
- mode: vec![RunnerMode::Simulation],
- simulation_tool: None,
- profile_folder: None,
- skip_upload: false,
- skip_run: false,
- skip_setup: false,
- allow_empty: false,
- go_runner_version: None,
- perf_run_args: PerfRunArgs {
- enable_perf: false,
- perf_unwinding_mode: None,
- },
- },
- walltime_args: Default::default(),
- name: None,
- command: vec!["my-binary".into(), "arg1".into(), "arg2".into()],
- };
-
- let config = Config::try_from(exec_args).unwrap();
-
- assert_eq!(
- config.upload_url,
- Url::parse("https://api.codspeed.io/upload").unwrap()
- );
- assert_eq!(config.command, "exec-harness my-binary arg1 arg2");
- }
}
diff --git a/src/executor/execution_context.rs b/src/executor/execution_context.rs
index ca71a3fc..8713287c 100644
--- a/src/executor/execution_context.rs
+++ b/src/executor/execution_context.rs
@@ -1,4 +1,4 @@
-use super::Config;
+use super::ExecutorConfig;
use std::path::PathBuf;
use super::create_profile_folder;
@@ -8,13 +8,13 @@ use super::create_profile_folder;
/// Contains only the mode-specific configuration and the profile folder path.
/// Shared state (provider, system_info, logger) lives in [`Orchestrator`].
pub struct ExecutionContext {
- pub config: Config,
+ pub config: ExecutorConfig,
/// Directory path where profiling data and results are stored
pub profile_folder: PathBuf,
}
impl ExecutionContext {
- pub fn new(config: Config) -> anyhow::Result {
+ pub fn new(config: ExecutorConfig) -> anyhow::Result {
let profile_folder = if let Some(profile_folder) = &config.profile_folder {
profile_folder.clone()
} else {
diff --git a/src/executor/helpers/env.rs b/src/executor/helpers/env.rs
index 51b6f714..07283b43 100644
--- a/src/executor/helpers/env.rs
+++ b/src/executor/helpers/env.rs
@@ -1,12 +1,12 @@
use std::{collections::HashMap, env::consts::ARCH, path::Path};
-use crate::executor::Config;
+use crate::executor::ExecutorConfig;
use crate::runner_mode::RunnerMode;
pub fn get_base_injected_env(
mode: RunnerMode,
profile_folder: &Path,
- config: &Config,
+ config: &ExecutorConfig,
) -> HashMap<&'static str, String> {
let runner_mode_internal_env_value = match mode {
// While the runner now deprecates the usage of instrumentation with a message, we
diff --git a/src/executor/helpers/get_bench_command.rs b/src/executor/helpers/get_bench_command.rs
index 4250f061..6635b449 100644
--- a/src/executor/helpers/get_bench_command.rs
+++ b/src/executor/helpers/get_bench_command.rs
@@ -1,7 +1,7 @@
-use crate::executor::Config;
+use crate::executor::ExecutorConfig;
use crate::prelude::*;
-pub fn get_bench_command(config: &Config) -> Result {
+pub fn get_bench_command(config: &ExecutorConfig) -> Result {
let bench_command = &config.command.trim();
if bench_command.is_empty() {
@@ -19,7 +19,7 @@ mod tests {
#[test]
fn test_get_bench_command_empty() {
- let config = Config::test();
+ let config = ExecutorConfig::test();
assert!(get_bench_command(&config).is_err());
assert_eq!(
get_bench_command(&config).unwrap_err().to_string(),
@@ -29,16 +29,16 @@ mod tests {
#[test]
fn test_get_bench_command_cargo() {
- let config = Config {
+ let config = ExecutorConfig {
command: "cargo codspeed bench".into(),
- ..Config::test()
+ ..ExecutorConfig::test()
};
assert_eq!(get_bench_command(&config).unwrap(), "cargo-codspeed bench");
}
#[test]
fn test_get_bench_command_multiline() {
- let config = Config {
+ let config = ExecutorConfig {
// TODO: use indoc! macro
command: r#"
cargo codspeed bench --features "foo bar"
@@ -46,7 +46,7 @@ pnpm vitest bench "my-app"
pytest tests/ --codspeed
"#
.into(),
- ..Config::test()
+ ..ExecutorConfig::test()
};
assert_eq!(
get_bench_command(&config).unwrap(),
diff --git a/src/executor/mod.rs b/src/executor/mod.rs
index a674dc97..79ac48ec 100644
--- a/src/executor/mod.rs
+++ b/src/executor/mod.rs
@@ -17,7 +17,7 @@ use crate::prelude::*;
use crate::runner_mode::RunnerMode;
use crate::system::SystemInfo;
use async_trait::async_trait;
-pub use config::Config;
+pub use config::{BenchmarkTarget, ExecutorConfig, OrchestratorConfig};
pub use execution_context::ExecutionContext;
pub use helpers::profile_folder::create_profile_folder;
pub use interfaces::ExecutorName;
diff --git a/src/executor/orchestrator.rs b/src/executor/orchestrator.rs
index d01a6d4d..0e732a4b 100644
--- a/src/executor/orchestrator.rs
+++ b/src/executor/orchestrator.rs
@@ -1,19 +1,25 @@
-use super::{Config, ExecutionContext, ExecutorName, get_executor_from_mode, run_executor};
+use super::{ExecutionContext, ExecutorName, get_executor_from_mode, run_executor};
use crate::api_client::CodSpeedAPIClient;
+use crate::binary_installer::ensure_binary_installed;
+use crate::cli::exec::{EXEC_HARNESS_COMMAND, EXEC_HARNESS_VERSION, multi_targets};
use crate::cli::run::logger::Logger;
use crate::config::CodSpeedConfig;
+use crate::executor::config::BenchmarkTarget;
+use crate::executor::config::OrchestratorConfig;
use crate::prelude::*;
use crate::run_environment::{self, RunEnvironment, RunEnvironmentProvider};
use crate::runner_mode::RunnerMode;
use crate::system::{self, SystemInfo};
use crate::upload::{UploadResult, upload};
+use serde_json::Value;
+use std::collections::BTreeMap;
use std::path::Path;
/// Shared orchestration state created once per CLI invocation.
///
-/// Contains the run environment provider, system info, and logger — things
-/// that are the same regardless of which executor mode is running.
+/// Holds the run-level configuration, environment provider, system info, and logger.
pub struct Orchestrator {
+ pub config: OrchestratorConfig,
pub system_info: SystemInfo,
pub provider: Box,
pub logger: Logger,
@@ -25,11 +31,11 @@ impl Orchestrator {
}
pub async fn new(
- config: &mut Config,
+ mut config: OrchestratorConfig,
codspeed_config: &CodSpeedConfig,
api_client: &CodSpeedAPIClient,
) -> Result {
- let provider = run_environment::get_provider(config, api_client).await?;
+ let provider = run_environment::get_provider(&config, api_client).await?;
let system_info = SystemInfo::new()?;
system::check_system(&system_info)?;
let logger = Logger::new(provider.as_ref())?;
@@ -51,34 +57,89 @@ impl Orchestrator {
}
Ok(Orchestrator {
+ config,
system_info,
provider,
logger,
})
}
- /// Execute benchmarks for all configured modes, then upload results.
- pub async fn execute(
- &self,
- config: &mut Config,
- setup_cache_dir: Option<&Path>,
- poll_results: F,
- ) -> Result<()>
+ /// Execute all benchmark targets for all configured modes, then upload results.
+ ///
+ /// Processes `self.config.targets` as follows:
+ /// - All `Exec` targets are combined into a single exec-harness invocation (one executor per mode)
+ /// - Each `Entrypoint` target is run independently (one executor per mode per target)
+ pub async fn execute(&self, setup_cache_dir: Option<&Path>, poll_results: F) -> Result<()>
where
F: AsyncFn(&UploadResult) -> Result<()>,
{
- // Phase 1: Run all executors
- let modes = config.modes.clone();
- let is_multi_mode = modes.len() > 1;
- let mut completed_runs: Vec<(ExecutionContext, ExecutorName)> = vec![];
- if !config.skip_run {
+ let exec_targets: Vec<&BenchmarkTarget> = self
+ .config
+ .targets
+ .iter()
+ .filter(|t| matches!(t, BenchmarkTarget::Exec { .. }))
+ .collect();
+
+ let entrypoint_targets: Vec<&BenchmarkTarget> = self
+ .config
+ .targets
+ .iter()
+ .filter(|t| matches!(t, BenchmarkTarget::Entrypoint { .. }))
+ .collect();
+
+ let mut all_completed_runs = vec![];
+
+ if !self.config.skip_run {
start_opened_group!("Running the benchmarks");
}
- for mode in &modes {
- if modes.len() > 1 {
+
+ // All exec targets combined into a single exec-harness invocation
+ if !exec_targets.is_empty() {
+ ensure_binary_installed(EXEC_HARNESS_COMMAND, EXEC_HARNESS_VERSION, || {
+ format!(
+ "https://github.com/CodSpeedHQ/codspeed/releases/download/exec-harness-v{EXEC_HARNESS_VERSION}/exec-harness-installer.sh"
+ )
+ })
+ .await?;
+
+ let pipe_cmd = multi_targets::build_exec_targets_pipe_command(&exec_targets)?;
+ let completed_runs = self.run_all_modes(pipe_cmd, setup_cache_dir).await?;
+ all_completed_runs.extend(completed_runs);
+ }
+
+ // Each entrypoint target runs independently
+ for target in entrypoint_targets {
+ let BenchmarkTarget::Entrypoint { command, .. } = target else {
+ unreachable!()
+ };
+ let completed_runs = self.run_all_modes(command.clone(), setup_cache_dir).await?;
+ all_completed_runs.extend(completed_runs);
+ }
+
+ if !self.config.skip_run {
+ end_group!();
+ }
+
+ self.upload_and_poll(all_completed_runs, &poll_results)
+ .await?;
+
+ Ok(())
+ }
+
+ /// Run the given command across all configured modes, returning completed run contexts.
+ async fn run_all_modes(
+ &self,
+ command: String,
+ setup_cache_dir: Option<&Path>,
+ ) -> Result> {
+ let modes = &self.config.modes;
+ let is_multi_mode = modes.len() > 1;
+ let mut completed_runs: Vec<(ExecutionContext, ExecutorName)> = vec![];
+ for mode in modes {
+ if is_multi_mode {
info!("Running benchmarks for {mode} mode");
}
- let mut per_mode_config = config.for_mode(mode);
+ let mut per_mode_config = self.config.executor_config_for_command(command.clone());
// For multi-mode runs, always create a fresh profile folder per mode
// even if the user specified one (to avoid modes overwriting each other).
if is_multi_mode {
@@ -90,18 +151,25 @@ impl Orchestrator {
run_executor(executor.as_ref(), self, &ctx, setup_cache_dir).await?;
completed_runs.push((ctx, executor.name()));
}
- if !config.skip_run {
- end_group!();
- }
+ Ok(completed_runs)
+ }
- // Phase 2: Upload all results
- if !config.skip_upload {
+ /// Upload completed runs and poll results.
+ async fn upload_and_poll(
+ &self,
+ mut completed_runs: Vec<(ExecutionContext, ExecutorName)>,
+ poll_results: F,
+ ) -> Result<()>
+ where
+ F: AsyncFn(&UploadResult) -> Result<()>,
+ {
+ let skip_upload = self.config.skip_upload;
+
+ if !skip_upload {
start_group!("Uploading results");
let last_upload_result = self.upload_all(&mut completed_runs).await?;
end_group!();
- // Phase 3: Poll results after all uploads are complete.
- // All uploads share the same run_id/owner/repository, so polling once is sufficient.
if self.is_local() {
poll_results(&last_upload_result).await?;
}
@@ -112,6 +180,22 @@ impl Orchestrator {
Ok(())
}
+ /// Build the structured suffix that differentiates this upload within the run.
+ fn build_run_part_suffix(
+ executor_name: &ExecutorName,
+ run_part_index: usize,
+ total_runs: usize,
+ ) -> BTreeMap {
+ let mut suffix = BTreeMap::from([(
+ "executor".to_string(),
+ Value::from(executor_name.to_string()),
+ )]);
+ if total_runs > 1 {
+ suffix.insert("run-part-index".to_string(), Value::from(run_part_index));
+ }
+ suffix
+ }
+
pub async fn upload_all(
&self,
completed_runs: &mut [(ExecutionContext, ExecutorName)],
@@ -119,7 +203,7 @@ impl Orchestrator {
let mut last_upload_result: Option = None;
let total_runs = completed_runs.len();
- for (ctx, executor_name) in completed_runs.iter_mut() {
+ for (run_part_index, (ctx, executor_name)) in completed_runs.iter_mut().enumerate() {
if !self.is_local() {
// OIDC tokens can expire quickly, so refresh just before each upload
self.provider.set_oidc_token(&mut ctx.config).await?;
@@ -128,7 +212,9 @@ impl Orchestrator {
if total_runs > 1 {
info!("Uploading results for {executor_name:?} executor");
}
- let upload_result = upload(self, ctx, executor_name.clone()).await?;
+ let run_part_suffix =
+ Self::build_run_part_suffix(executor_name, run_part_index, total_runs);
+ let upload_result = upload(self, ctx, executor_name.clone(), run_part_suffix).await?;
last_upload_result = Some(upload_result);
}
info!("Performance data uploaded");
diff --git a/src/executor/tests.rs b/src/executor/tests.rs
index 930a3f06..317d9f17 100644
--- a/src/executor/tests.rs
+++ b/src/executor/tests.rs
@@ -1,7 +1,6 @@
-use super::Config;
+use super::ExecutorConfig;
use crate::executor::ExecutionContext;
use crate::executor::Executor;
-use crate::runner_mode::RunnerMode;
use crate::system::SystemInfo;
use rstest_reuse::{self, *};
use shell_quote::{Bash, QuoteRefExt};
@@ -118,24 +117,12 @@ fn test_cases(#[case] cmd: &str) {}
#[case(ENV_TESTS[7])]
fn env_test_cases(#[case] env_case: (&str, &str)) {}
-async fn create_test_setup(config: Config) -> (ExecutionContext, TempDir) {
- use crate::executor::config::RepositoryOverride;
- use crate::run_environment::interfaces::RepositoryProvider;
-
+async fn create_test_setup(config: ExecutorConfig) -> (ExecutionContext, TempDir) {
let temp_dir = TempDir::new().unwrap();
let mut config_with_folder = config;
config_with_folder.profile_folder = Some(temp_dir.path().to_path_buf());
- // Provide a repository override so tests don't need a git repository
- if config_with_folder.repository_override.is_none() {
- config_with_folder.repository_override = Some(RepositoryOverride {
- owner: "test-owner".to_string(),
- repository: "test-repo".to_string(),
- repository_provider: RepositoryProvider::GitHub,
- });
- }
-
// Provide a test token so authentication doesn't fail
if config_with_folder.token.is_none() {
config_with_folder.token = Some("test-token".to_string());
@@ -180,11 +167,10 @@ mod valgrind {
(_lock, executor)
}
- fn valgrind_config(command: &str) -> Config {
- Config {
- modes: vec![RunnerMode::Simulation],
+ fn valgrind_config(command: &str) -> ExecutorConfig {
+ ExecutorConfig {
command: command.to_string(),
- ..Config::test()
+ ..ExecutorConfig::test()
}
}
@@ -248,12 +234,11 @@ mod walltime {
(permit, WallTimeExecutor::new())
}
- fn walltime_config(command: &str, enable_perf: bool) -> Config {
- Config {
- modes: vec![RunnerMode::Walltime],
+ fn walltime_config(command: &str, enable_perf: bool) -> ExecutorConfig {
+ ExecutorConfig {
command: command.to_string(),
enable_perf,
- ..Config::test()
+ ..ExecutorConfig::test()
}
}
@@ -352,12 +337,22 @@ fi
EXEC_HARNESS_COMMANDS.to_vec()
}
+ fn wrap_with_exec_harness(
+ walltime_args: &exec_harness::walltime::WalltimeExecutionArgs,
+ command: &[String],
+ ) -> String {
+ shell_words::join(
+ std::iter::once(crate::cli::exec::EXEC_HARNESS_COMMAND)
+ .chain(walltime_args.to_cli_args().iter().map(|s| s.as_str()))
+ .chain(command.iter().map(|s| s.as_str())),
+ )
+ }
+
// Ensure that the walltime executor works with the exec-harness
#[apply(exec_harness_test_cases)]
#[rstest::rstest]
#[test_log::test(tokio::test)]
async fn test_exec_harness(#[case] cmd: &str) {
- use crate::cli::exec::wrap_with_exec_harness;
use exec_harness::walltime::WalltimeExecutionArgs;
let (_permit, executor) = get_walltime_executor().await;
@@ -416,11 +411,10 @@ mod memory {
(permit, _lock, MemoryExecutor)
}
- fn memory_config(command: &str) -> Config {
- Config {
- modes: vec![RunnerMode::Memory],
+ fn memory_config(command: &str) -> ExecutorConfig {
+ ExecutorConfig {
command: command.to_string(),
- ..Config::test()
+ ..ExecutorConfig::test()
}
}
diff --git a/src/executor/valgrind/measure.rs b/src/executor/valgrind/measure.rs
index 823aafe0..48aafc37 100644
--- a/src/executor/valgrind/measure.rs
+++ b/src/executor/valgrind/measure.rs
@@ -1,4 +1,4 @@
-use crate::executor::Config;
+use crate::executor::ExecutorConfig;
use crate::executor::RunnerMode;
use crate::executor::config::SimulationTool;
use crate::executor::helpers::env::get_base_injected_env;
@@ -84,7 +84,7 @@ echo -n "$status" > "$2"
}
pub async fn measure(
- config: &Config,
+ config: &ExecutorConfig,
profile_folder: &Path,
mongo_tracer: &Option,
) -> Result<()> {
diff --git a/src/executor/wall_time/executor.rs b/src/executor/wall_time/executor.rs
index 54d84ef5..d1ccd50a 100644
--- a/src/executor/wall_time/executor.rs
+++ b/src/executor/wall_time/executor.rs
@@ -1,7 +1,7 @@
use super::helpers::validate_walltime_results;
use super::perf::PerfRunner;
-use crate::executor::Config;
use crate::executor::Executor;
+use crate::executor::ExecutorConfig;
use crate::executor::helpers::command::CommandBuilder;
use crate::executor::helpers::env::{get_base_injected_env, is_codspeed_debug_enabled};
use crate::executor::helpers::get_bench_command::get_bench_command;
@@ -83,7 +83,7 @@ impl WallTimeExecutor {
}
fn walltime_bench_cmd(
- config: &Config,
+ config: &ExecutorConfig,
execution_context: &ExecutionContext,
) -> Result<(NamedTempFile, NamedTempFile, CommandBuilder)> {
// Build additional PATH environment variables
diff --git a/src/executor/wall_time/perf/mod.rs b/src/executor/wall_time/perf/mod.rs
index d685f3ae..bfbfc9aa 100644
--- a/src/executor/wall_time/perf/mod.rs
+++ b/src/executor/wall_time/perf/mod.rs
@@ -1,7 +1,7 @@
#![cfg_attr(not(unix), allow(dead_code, unused_mut))]
use crate::cli::UnwindingMode;
-use crate::executor::Config;
+use crate::executor::ExecutorConfig;
use crate::executor::helpers::command::CommandBuilder;
use crate::executor::helpers::env::is_codspeed_debug_enabled;
use crate::executor::helpers::harvest_perf_maps_for_pids::harvest_perf_maps_for_pids;
@@ -87,7 +87,7 @@ impl PerfRunner {
pub async fn run(
&self,
mut cmd_builder: CommandBuilder,
- config: &Config,
+ config: &ExecutorConfig,
profile_folder: &Path,
) -> anyhow::Result {
let perf_fifo = PerfFifo::new()?;
diff --git a/src/project_config/interfaces.rs b/src/project_config/interfaces.rs
index 9daba3a7..2d7a00e0 100644
--- a/src/project_config/interfaces.rs
+++ b/src/project_config/interfaces.rs
@@ -1,5 +1,5 @@
use schemars::JsonSchema;
-use serde::{Deserialize, Serialize};
+use serde::{Deserialize, Deserializer, Serialize};
/// Project-level configuration from codspeed.yaml file
///
@@ -14,18 +14,33 @@ pub struct ProjectConfig {
pub benchmarks: Option>,
}
-/// A benchmark target to execute
-#[derive(Debug, Deserialize, Serialize, PartialEq, JsonSchema)]
+/// A benchmark target to execute.
+///
+/// Either `exec` or `entrypoint` must be specified (mutually exclusive).
+#[derive(Debug, Serialize, Deserialize, PartialEq, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct Target {
- /// Optional name for this target
+ /// Optional name for this target (display purposes only)
pub name: Option,
- /// Command to execute
- pub exec: String,
+ /// Optional id to run a subset of targets (e.g. `codspeed run --bench my_id`)
+ pub id: Option,
+ /// The command to run
+ #[serde(flatten)]
+ pub command: TargetCommand,
/// Target-specific options
pub options: Option,
}
+/// The command for a benchmark target — exactly one of `exec` or `entrypoint`.
+#[derive(Debug, Clone, Serialize, PartialEq, JsonSchema)]
+#[serde(untagged)]
+pub enum TargetCommand {
+ /// Command measured by exec-harness
+ Exec { exec: String },
+ /// Command with built-in benchmark harness
+ Entrypoint { entrypoint: String },
+}
+
#[derive(Debug, Deserialize, Serialize, PartialEq, JsonSchema)]
#[serde(rename_all = "kebab-case")]
pub struct TargetOptions {
@@ -59,3 +74,35 @@ pub struct WalltimeOptions {
/// Minimum number of rounds
pub min_rounds: Option,
}
+
+// Custom implementation to enforce mutual exclusivity of `exec` and `entrypoint` fields, not
+// directly supported by serde's untagged enums.
+impl<'de> Deserialize<'de> for TargetCommand {
+ fn deserialize(deserializer: D) -> Result
+ where
+ D: Deserializer<'de>,
+ {
+ #[derive(Deserialize)]
+ #[serde(rename_all = "kebab-case")]
+ struct RawTarget {
+ exec: Option,
+ entrypoint: Option,
+ }
+
+ let raw = RawTarget::deserialize(deserializer)?;
+ Ok(match (raw.exec, raw.entrypoint) {
+ (Some(exec), None) => TargetCommand::Exec { exec },
+ (None, Some(entrypoint)) => TargetCommand::Entrypoint { entrypoint },
+ (Some(_), Some(_)) => {
+ return Err(serde::de::Error::custom(
+ "a target cannot have both `exec` and `entrypoint`",
+ ));
+ }
+ (None, None) => {
+ return Err(serde::de::Error::custom(
+ "a target must have either `exec` or `entrypoint`",
+ ));
+ }
+ })
+ }
+}
diff --git a/src/project_config/mod.rs b/src/project_config/mod.rs
index 06a191ed..4bb426d5 100644
--- a/src/project_config/mod.rs
+++ b/src/project_config/mod.rs
@@ -171,239 +171,4 @@ impl ProjectConfig {
}
#[cfg(test)]
-mod tests {
- use super::*;
- use tempfile::TempDir;
-
- #[test]
- fn test_deserialize_minimal_config() {
- let yaml = r#"
-options:
- warmup-time: 1s
-"#;
- let config: ProjectConfig = serde_yaml::from_str(yaml).unwrap();
- assert!(config.options.is_some());
- let options = config.options.unwrap();
- assert!(options.walltime.is_some());
- assert_eq!(
- options.walltime.unwrap().warmup_time,
- Some("1s".to_string())
- );
- }
-
- #[test]
- fn test_deserialize_full_walltime_config() {
- let yaml = r#"
-options:
- warmup-time: 2s
- max-time: 10s
- min-time: 1s
- max-rounds: 100
- min-rounds: 10
- working-directory: ./bench
-"#;
- let config: ProjectConfig = serde_yaml::from_str(yaml).unwrap();
- let options = config.options.unwrap();
- let walltime = options.walltime.unwrap();
-
- assert_eq!(walltime.warmup_time, Some("2s".to_string()));
- assert_eq!(walltime.max_time, Some("10s".to_string()));
- assert_eq!(walltime.min_time, Some("1s".to_string()));
- assert_eq!(walltime.max_rounds, Some(100));
- assert_eq!(walltime.min_rounds, Some(10));
- assert_eq!(options.working_directory, Some("./bench".to_string()));
- }
-
- #[test]
- fn test_deserialize_empty_config() {
- let yaml = r#"{}"#;
- let config: ProjectConfig = serde_yaml::from_str(yaml).unwrap();
- assert!(config.options.is_none());
- }
-
- #[test]
- fn test_validate_conflicting_min_time_max_rounds() {
- let config = ProjectConfig {
- options: Some(ProjectOptions {
- walltime: Some(WalltimeOptions {
- warmup_time: None,
- max_time: None,
- min_time: Some("1s".to_string()),
- max_rounds: Some(10),
- min_rounds: None,
- }),
- working_directory: None,
- }),
- benchmarks: None,
- };
-
- let result = config.validate();
- assert!(result.is_err());
- assert!(
- result
- .unwrap_err()
- .to_string()
- .contains("cannot use both min_time and max_rounds")
- );
- }
-
- #[test]
- fn test_validate_conflicting_max_time_min_rounds() {
- let config = ProjectConfig {
- options: Some(ProjectOptions {
- walltime: Some(WalltimeOptions {
- warmup_time: None,
- max_time: Some("10s".to_string()),
- min_time: None,
- max_rounds: None,
- min_rounds: Some(5),
- }),
- working_directory: None,
- }),
- benchmarks: None,
- };
-
- let result = config.validate();
- assert!(result.is_err());
- assert!(
- result
- .unwrap_err()
- .to_string()
- .contains("cannot use both max_time and min_rounds")
- );
- }
-
- #[test]
- fn test_validate_valid_config() {
- let config = ProjectConfig {
- options: Some(ProjectOptions {
- walltime: Some(WalltimeOptions {
- warmup_time: Some("1s".to_string()),
- max_time: Some("10s".to_string()),
- min_time: Some("2s".to_string()),
- max_rounds: None,
- min_rounds: None,
- }),
- working_directory: Some("./bench".to_string()),
- }),
- benchmarks: None,
- };
-
- assert!(config.validate().is_ok());
- }
-
- #[test]
- fn test_load_from_path() {
- let temp_dir = TempDir::new().unwrap();
- let config_path = temp_dir.path().join("codspeed.yaml");
-
- fs::write(
- &config_path,
- r#"
-options:
- warmup-time: 5s
-"#,
- )
- .unwrap();
-
- let config = ProjectConfig::load_from_path(&config_path).unwrap();
- assert!(config.options.is_some());
- }
-
- #[test]
- fn test_load_from_path_invalid_yaml() {
- let temp_dir = TempDir::new().unwrap();
- let config_path = temp_dir.path().join("codspeed.yaml");
-
- fs::write(&config_path, "invalid: yaml: content:").unwrap();
-
- let result = ProjectConfig::load_from_path(&config_path);
- assert!(result.is_err());
- }
-
- #[test]
- fn test_discover_with_explicit_path() {
- let temp_dir = TempDir::new().unwrap();
- let config_path = temp_dir.path().join("my-config.yaml");
-
- fs::write(
- &config_path,
- r#"
-options:
- warmup-time: 3s
-"#,
- )
- .unwrap();
-
- let config = ProjectConfig::discover_and_load(Some(&config_path), temp_dir.path()).unwrap();
-
- assert!(config.is_some());
- let config = config.unwrap();
- assert!(config.options.is_some());
- }
-
- #[test]
- fn test_discover_with_explicit_path_not_found() {
- let temp_dir = TempDir::new().unwrap();
- let config_path = temp_dir.path().join("missing.yaml");
-
- let result = ProjectConfig::discover_and_load(Some(&config_path), temp_dir.path());
- assert!(result.is_err());
- }
-
- #[test]
- fn test_discover_finds_codspeed_yaml() {
- let temp_dir = TempDir::new().unwrap();
- let config_path = temp_dir.path().join("codspeed.yaml");
-
- fs::write(
- &config_path,
- r#"
-options:
- warmup-time: 2s
-"#,
- )
- .unwrap();
-
- let config = ProjectConfig::discover_and_load(None, temp_dir.path()).unwrap();
-
- assert!(config.is_some());
- }
-
- #[test]
- fn test_discover_priority_yaml_over_yml() {
- let temp_dir = TempDir::new().unwrap();
-
- // Create both .yaml and .yml files
- fs::write(
- temp_dir.path().join("codspeed.yaml"),
- r#"
-options:
- warmup-time: 1s
-"#,
- )
- .unwrap();
-
- fs::write(
- temp_dir.path().join("codspeed.yml"),
- r#"
-options:
- warmup-time: 2s
-"#,
- )
- .unwrap();
-
- let config = ProjectConfig::discover_and_load(None, temp_dir.path()).unwrap();
-
- assert!(config.is_some());
- // Note: We can no longer verify which file was loaded since we don't return the path
- // The priority is still enforced but not testable without checking the filesystem
- }
-
- #[test]
- fn test_discover_no_config_found() {
- let temp_dir = TempDir::new().unwrap();
- let config = ProjectConfig::discover_and_load(None, temp_dir.path()).unwrap();
- assert!(config.is_none());
- }
-}
+mod tests;
diff --git a/src/project_config/tests.rs b/src/project_config/tests.rs
new file mode 100644
index 00000000..095254fc
--- /dev/null
+++ b/src/project_config/tests.rs
@@ -0,0 +1,347 @@
+use super::*;
+use std::fs;
+use tempfile::TempDir;
+
+#[test]
+fn test_deserialize_minimal_config() {
+ let yaml = r#"
+options:
+ warmup-time: 1s
+"#;
+ let config: ProjectConfig = serde_yaml::from_str(yaml).unwrap();
+ assert!(config.options.is_some());
+ let options = config.options.unwrap();
+ assert!(options.walltime.is_some());
+ assert_eq!(
+ options.walltime.unwrap().warmup_time,
+ Some("1s".to_string())
+ );
+}
+
+#[test]
+fn test_deserialize_full_walltime_config() {
+ let yaml = r#"
+options:
+ warmup-time: 2s
+ max-time: 10s
+ min-time: 1s
+ max-rounds: 100
+ min-rounds: 10
+ working-directory: ./bench
+"#;
+ let config: ProjectConfig = serde_yaml::from_str(yaml).unwrap();
+ let options = config.options.unwrap();
+ let walltime = options.walltime.unwrap();
+
+ assert_eq!(walltime.warmup_time, Some("2s".to_string()));
+ assert_eq!(walltime.max_time, Some("10s".to_string()));
+ assert_eq!(walltime.min_time, Some("1s".to_string()));
+ assert_eq!(walltime.max_rounds, Some(100));
+ assert_eq!(walltime.min_rounds, Some(10));
+ assert_eq!(options.working_directory, Some("./bench".to_string()));
+}
+
+#[test]
+fn test_deserialize_empty_config() {
+ let yaml = r#"{}"#;
+ let config: ProjectConfig = serde_yaml::from_str(yaml).unwrap();
+ assert!(config.options.is_none());
+}
+
+#[test]
+fn test_validate_conflicting_min_time_max_rounds() {
+ let config = ProjectConfig {
+ options: Some(ProjectOptions {
+ walltime: Some(WalltimeOptions {
+ warmup_time: None,
+ max_time: None,
+ min_time: Some("1s".to_string()),
+ max_rounds: Some(10),
+ min_rounds: None,
+ }),
+ working_directory: None,
+ }),
+ benchmarks: None,
+ };
+
+ let result = config.validate();
+ assert!(result.is_err());
+ assert!(
+ result
+ .unwrap_err()
+ .to_string()
+ .contains("cannot use both min_time and max_rounds")
+ );
+}
+
+#[test]
+fn test_validate_conflicting_max_time_min_rounds() {
+ let config = ProjectConfig {
+ options: Some(ProjectOptions {
+ walltime: Some(WalltimeOptions {
+ warmup_time: None,
+ max_time: Some("10s".to_string()),
+ min_time: None,
+ max_rounds: None,
+ min_rounds: Some(5),
+ }),
+ working_directory: None,
+ }),
+ benchmarks: None,
+ };
+
+ let result = config.validate();
+ assert!(result.is_err());
+ assert!(
+ result
+ .unwrap_err()
+ .to_string()
+ .contains("cannot use both max_time and min_rounds")
+ );
+}
+
+#[test]
+fn test_validate_valid_config() {
+ let config = ProjectConfig {
+ options: Some(ProjectOptions {
+ walltime: Some(WalltimeOptions {
+ warmup_time: Some("1s".to_string()),
+ max_time: Some("10s".to_string()),
+ min_time: Some("2s".to_string()),
+ max_rounds: None,
+ min_rounds: None,
+ }),
+ working_directory: Some("./bench".to_string()),
+ }),
+ benchmarks: None,
+ };
+
+ assert!(config.validate().is_ok());
+}
+
+#[test]
+fn test_load_from_path() {
+ let temp_dir = TempDir::new().unwrap();
+ let config_path = temp_dir.path().join("codspeed.yaml");
+
+ fs::write(
+ &config_path,
+ r#"
+options:
+ warmup-time: 5s
+"#,
+ )
+ .unwrap();
+
+ let config = ProjectConfig::load_from_path(&config_path).unwrap();
+ assert!(config.options.is_some());
+}
+
+#[test]
+fn test_load_from_path_invalid_yaml() {
+ let temp_dir = TempDir::new().unwrap();
+ let config_path = temp_dir.path().join("codspeed.yaml");
+
+ fs::write(&config_path, "invalid: yaml: content:").unwrap();
+
+ let result = ProjectConfig::load_from_path(&config_path);
+ assert!(result.is_err());
+}
+
+#[test]
+fn test_discover_with_explicit_path() {
+ let temp_dir = TempDir::new().unwrap();
+ let config_path = temp_dir.path().join("my-config.yaml");
+
+ fs::write(
+ &config_path,
+ r#"
+options:
+ warmup-time: 3s
+"#,
+ )
+ .unwrap();
+
+ let config = ProjectConfig::discover_and_load(Some(&config_path), temp_dir.path()).unwrap();
+
+ assert!(config.is_some());
+ let config = config.unwrap();
+ assert!(config.options.is_some());
+}
+
+#[test]
+fn test_discover_with_explicit_path_not_found() {
+ let temp_dir = TempDir::new().unwrap();
+ let config_path = temp_dir.path().join("missing.yaml");
+
+ let result = ProjectConfig::discover_and_load(Some(&config_path), temp_dir.path());
+ assert!(result.is_err());
+}
+
+#[test]
+fn test_discover_finds_codspeed_yaml() {
+ let temp_dir = TempDir::new().unwrap();
+ let config_path = temp_dir.path().join("codspeed.yaml");
+
+ fs::write(
+ &config_path,
+ r#"
+options:
+ warmup-time: 2s
+"#,
+ )
+ .unwrap();
+
+ let config = ProjectConfig::discover_and_load(None, temp_dir.path()).unwrap();
+
+ assert!(config.is_some());
+}
+
+#[test]
+fn test_discover_priority_yaml_over_yml() {
+ let temp_dir = TempDir::new().unwrap();
+
+ // Create both .yaml and .yml files
+ fs::write(
+ temp_dir.path().join("codspeed.yaml"),
+ r#"
+options:
+ warmup-time: 1s
+"#,
+ )
+ .unwrap();
+
+ fs::write(
+ temp_dir.path().join("codspeed.yml"),
+ r#"
+options:
+ warmup-time: 2s
+"#,
+ )
+ .unwrap();
+
+ let config = ProjectConfig::discover_and_load(None, temp_dir.path()).unwrap();
+
+ assert!(config.is_some());
+ // Note: We can no longer verify which file was loaded since we don't return the path
+ // The priority is still enforced but not testable without checking the filesystem
+}
+
+#[test]
+fn test_discover_no_config_found() {
+ let temp_dir = TempDir::new().unwrap();
+ let config = ProjectConfig::discover_and_load(None, temp_dir.path()).unwrap();
+ assert!(config.is_none());
+}
+
+#[test]
+fn test_deserialize_exec_target() {
+ let yaml = r#"
+benchmarks:
+ - exec: ls -al /nix/store
+ - name: my exec benchmark
+ exec: ./my_binary
+ options:
+ warmup-time: 1s
+"#;
+ let config: ProjectConfig = serde_yaml::from_str(yaml).unwrap();
+ let benchmarks = config.benchmarks.unwrap();
+ assert_eq!(benchmarks.len(), 2);
+
+ assert_eq!(
+ benchmarks[0].command,
+ TargetCommand::Exec {
+ exec: "ls -al /nix/store".to_string()
+ }
+ );
+ assert!(benchmarks[0].name.is_none());
+
+ assert_eq!(
+ benchmarks[1].command,
+ TargetCommand::Exec {
+ exec: "./my_binary".to_string()
+ }
+ );
+ assert_eq!(benchmarks[1].name, Some("my exec benchmark".to_string()));
+ let walltime = benchmarks[1]
+ .options
+ .as_ref()
+ .unwrap()
+ .walltime
+ .as_ref()
+ .unwrap();
+ assert_eq!(walltime.warmup_time, Some("1s".to_string()));
+}
+
+#[test]
+fn test_deserialize_entrypoint_target() {
+ let yaml = r#"
+benchmarks:
+ - name: my python benchmarks
+ entrypoint: pytest --codspeed src
+ - entrypoint: cargo bench
+"#;
+ let config: ProjectConfig = serde_yaml::from_str(yaml).unwrap();
+ let benchmarks = config.benchmarks.unwrap();
+ assert_eq!(benchmarks.len(), 2);
+
+ assert_eq!(
+ benchmarks[0].command,
+ TargetCommand::Entrypoint {
+ entrypoint: "pytest --codspeed src".to_string()
+ }
+ );
+ assert_eq!(benchmarks[0].name, Some("my python benchmarks".to_string()));
+
+ assert_eq!(
+ benchmarks[1].command,
+ TargetCommand::Entrypoint {
+ entrypoint: "cargo bench".to_string()
+ }
+ );
+ assert!(benchmarks[1].name.is_none());
+}
+
+#[test]
+fn test_deserialize_mixed_targets() {
+ let yaml = r#"
+benchmarks:
+ - exec: ls -al
+ - entrypoint: pytest --codspeed src
+"#;
+ let config: ProjectConfig = serde_yaml::from_str(yaml).unwrap();
+ let benchmarks = config.benchmarks.unwrap();
+ assert_eq!(benchmarks.len(), 2);
+ assert!(matches!(benchmarks[0].command, TargetCommand::Exec { .. }));
+ assert!(matches!(
+ benchmarks[1].command,
+ TargetCommand::Entrypoint { .. }
+ ));
+}
+
+#[test]
+fn test_deserialize_target_missing_exec_and_entrypoint() {
+ let yaml = r#"
+benchmarks:
+ - name: missing command
+"#;
+ let result: Result = serde_yaml::from_str(yaml);
+ assert!(result.is_err());
+}
+
+#[test]
+fn test_deserialize_target_both_exec_and_entrypoint() {
+ let yaml = r#"
+benchmarks:
+ - exec: ls
+ entrypoint: pytest
+"#;
+ let result: Result = serde_yaml::from_str(yaml);
+ assert!(result.is_err());
+ assert!(
+ result
+ .unwrap_err()
+ .to_string()
+ .contains("a target cannot have both `exec` and `entrypoint`")
+ );
+}
diff --git a/src/run_environment/buildkite/provider.rs b/src/run_environment/buildkite/provider.rs
index b9f32e01..6d1ab4bd 100644
--- a/src/run_environment/buildkite/provider.rs
+++ b/src/run_environment/buildkite/provider.rs
@@ -6,7 +6,7 @@ use simplelog::SharedLogger;
use crate::cli::run::helpers::{
GitRemote, find_repository_root, get_env_variable, parse_git_remote,
};
-use crate::executor::Config;
+use crate::executor::config::{ExecutorConfig, OrchestratorConfig};
use crate::prelude::*;
use crate::run_environment::interfaces::{RepositoryProvider, RunEnvironmentMetadata, RunEvent};
use crate::run_environment::provider::{RunEnvironmentDetector, RunEnvironmentProvider};
@@ -52,9 +52,9 @@ pub fn get_ref() -> Result {
}
}
-impl TryFrom<&Config> for BuildkiteProvider {
+impl TryFrom<&OrchestratorConfig> for BuildkiteProvider {
type Error = Error;
- fn try_from(config: &Config) -> Result {
+ fn try_from(config: &OrchestratorConfig) -> Result {
if config.token.is_none() {
bail!("Token authentication is required for Buildkite");
}
@@ -159,7 +159,7 @@ impl RunEnvironmentProvider for BuildkiteProvider {
/// Docs:
/// - https://buildkite.com/docs/agent/v3/cli-oidc
/// - https://buildkite.com/docs/pipelines/security/oidc
- async fn set_oidc_token(&self, _config: &mut Config) -> Result<()> {
+ async fn set_oidc_token(&self, _config: &mut ExecutorConfig) -> Result<()> {
Ok(())
}
}
@@ -199,9 +199,9 @@ mod tests {
("BUILDKITE", Some("true")),
],
|| {
- let config = Config {
+ let config = OrchestratorConfig {
token: Some("token".into()),
- ..Config::test()
+ ..OrchestratorConfig::test()
};
let provider = BuildkiteProvider::try_from(&config).unwrap();
@@ -238,9 +238,9 @@ mod tests {
("BUILDKITE", Some("true")),
],
|| {
- let config = Config {
+ let config = OrchestratorConfig {
token: Some("token".into()),
- ..Config::test()
+ ..OrchestratorConfig::test()
};
let provider = BuildkiteProvider::try_from(&config).unwrap();
@@ -277,9 +277,9 @@ mod tests {
("BUILDKITE", Some("true")),
],
|| {
- let config = Config {
+ let config = OrchestratorConfig {
token: Some("token".into()),
- ..Config::test()
+ ..OrchestratorConfig::test()
};
let provider = BuildkiteProvider::try_from(&config).unwrap();
let run_environment_metadata = provider.get_run_environment_metadata().unwrap();
diff --git a/src/run_environment/github_actions/provider.rs b/src/run_environment/github_actions/provider.rs
index 6db406ce..4dc21928 100644
--- a/src/run_environment/github_actions/provider.rs
+++ b/src/run_environment/github_actions/provider.rs
@@ -9,7 +9,7 @@ use std::collections::BTreeMap;
use std::{env, fs};
use crate::cli::run::helpers::{find_repository_root, get_env_variable};
-use crate::executor::Config;
+use crate::executor::config::{ExecutorConfig, OrchestratorConfig};
use crate::prelude::*;
use crate::request_client::OIDC_CLIENT;
use crate::run_environment::interfaces::{
@@ -67,9 +67,9 @@ lazy_static! {
static ref PR_REF_REGEX: Regex = Regex::new(r"^refs/pull/(?P\d+)/merge$").unwrap();
}
-impl TryFrom<&Config> for GitHubActionsProvider {
+impl TryFrom<&OrchestratorConfig> for GitHubActionsProvider {
type Error = Error;
- fn try_from(config: &Config) -> Result {
+ fn try_from(config: &OrchestratorConfig) -> Result {
if config.repository_override.is_some() {
bail!("Specifying owner and repository from CLI is not supported for Github Actions");
}
@@ -288,7 +288,7 @@ impl RunEnvironmentProvider for GitHubActionsProvider {
/// - https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-with-reusable-workflows
/// - https://docs.github.com/en/actions/concepts/security/openid-connect
/// - https://docs.github.com/en/actions/reference/security/oidc#methods-for-requesting-the-oidc-token
- fn check_oidc_configuration(&mut self, config: &Config) -> Result<()> {
+ fn check_oidc_configuration(&mut self, config: &OrchestratorConfig) -> Result<()> {
// Check if a static token is already set
if config.token.is_some() {
announcement!(
@@ -343,7 +343,7 @@ impl RunEnvironmentProvider for GitHubActionsProvider {
///
/// All the validation has already been performed in `check_oidc_configuration`.
/// So if the oidc_config is None, we simply return.
- async fn set_oidc_token(&self, config: &mut Config) -> Result<()> {
+ async fn set_oidc_token(&self, config: &mut ExecutorConfig) -> Result<()> {
if let Some(oidc_config) = &self.oidc_config {
let request_url = format!(
"{}&audience={}",
@@ -435,9 +435,9 @@ mod tests {
("GITHUB_RUN_ID", Some("1234567890")),
],
|| {
- let config = Config {
+ let config = OrchestratorConfig {
token: Some("token".into()),
- ..Config::test()
+ ..OrchestratorConfig::test()
};
let github_actions_provider = GitHubActionsProvider::try_from(&config).unwrap();
assert_eq!(github_actions_provider.owner, "owner");
@@ -493,9 +493,9 @@ mod tests {
("VERSION", Some("0.1.0")),
],
|| {
- let config = Config {
+ let config = OrchestratorConfig {
token: Some("token".into()),
- ..Config::test()
+ ..OrchestratorConfig::test()
};
let github_actions_provider = GitHubActionsProvider::try_from(&config).unwrap();
assert!(!github_actions_provider.is_head_repo_fork);
@@ -549,9 +549,9 @@ mod tests {
("GH_MATRIX", Some("null")),
],
|| {
- let config = Config {
+ let config = OrchestratorConfig {
token: Some("token".into()),
- ..Config::test()
+ ..OrchestratorConfig::test()
};
let github_actions_provider = GitHubActionsProvider::try_from(&config).unwrap();
assert!(github_actions_provider.is_head_repo_fork);
@@ -632,9 +632,9 @@ mod tests {
),
],
|| {
- let config = Config {
+ let config = OrchestratorConfig {
token: Some("token".into()),
- ..Config::test()
+ ..OrchestratorConfig::test()
};
let github_actions_provider = GitHubActionsProvider::try_from(&config).unwrap();
assert!(!github_actions_provider.is_head_repo_fork);
diff --git a/src/run_environment/gitlab_ci/provider.rs b/src/run_environment/gitlab_ci/provider.rs
index a78de026..3fc3c280 100644
--- a/src/run_environment/gitlab_ci/provider.rs
+++ b/src/run_environment/gitlab_ci/provider.rs
@@ -4,7 +4,7 @@ use std::collections::BTreeMap;
use std::env;
use crate::cli::run::helpers::get_env_variable;
-use crate::executor::Config;
+use crate::executor::config::{ExecutorConfig, OrchestratorConfig};
use crate::prelude::*;
use crate::run_environment::interfaces::{
GlData, RepositoryProvider, RunEnvironment, RunEnvironmentMetadata, RunEvent, Sender,
@@ -27,9 +27,9 @@ pub struct GitLabCIProvider {
repository_root_path: String,
}
-impl TryFrom<&Config> for GitLabCIProvider {
+impl TryFrom<&OrchestratorConfig> for GitLabCIProvider {
type Error = Error;
- fn try_from(config: &Config) -> Result {
+ fn try_from(config: &OrchestratorConfig) -> Result {
if config.repository_override.is_some() {
bail!("Specifying owner and repository from CLI is not supported for GitLab CI");
}
@@ -187,7 +187,7 @@ impl RunEnvironmentProvider for GitLabCIProvider {
/// See:
/// - https://docs.gitlab.com/integration/openid_connect_provider/
/// - https://docs.gitlab.com/ci/secrets/id_token_authentication/
- async fn set_oidc_token(&self, _config: &mut Config) -> Result<()> {
+ async fn set_oidc_token(&self, _config: &mut ExecutorConfig) -> Result<()> {
Ok(())
}
}
@@ -224,9 +224,9 @@ mod tests {
("CI_COMMIT_REF_NAME", Some("main")),
],
|| {
- let config = Config {
+ let config = OrchestratorConfig {
token: Some("token".into()),
- ..Config::test()
+ ..OrchestratorConfig::test()
};
let gitlab_ci_provider = GitLabCIProvider::try_from(&config).unwrap();
let run_environment_metadata =
@@ -271,9 +271,9 @@ mod tests {
),
],
|| {
- let config = Config {
+ let config = OrchestratorConfig {
token: Some("token".into()),
- ..Config::test()
+ ..OrchestratorConfig::test()
};
let gitlab_ci_provider = GitLabCIProvider::try_from(&config).unwrap();
let run_environment_metadata =
@@ -318,9 +318,9 @@ mod tests {
),
],
|| {
- let config = Config {
+ let config = OrchestratorConfig {
token: Some("token".into()),
- ..Config::test()
+ ..OrchestratorConfig::test()
};
let gitlab_ci_provider = GitLabCIProvider::try_from(&config).unwrap();
let run_environment_metadata =
diff --git a/src/run_environment/interfaces.rs b/src/run_environment/interfaces.rs
index 0f05704e..3962c608 100644
--- a/src/run_environment/interfaces.rs
+++ b/src/run_environment/interfaces.rs
@@ -1,5 +1,5 @@
use serde::{Deserialize, Serialize};
-use serde_json::{Value, json};
+use serde_json::Value;
use std::collections::BTreeMap;
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Default)]
@@ -99,14 +99,21 @@ pub struct RunPart {
}
impl RunPart {
- /// Returns a new `RunPart` with the run index suffix appended to `run_part_id`.
+ /// Returns a new `RunPart` with the given suffix appended to `run_part_id`.
///
- /// This is used to differentiate multiple uploads within the same CI job execution.
- /// The suffix follows the same structured format as other metadata: `-{"run-index":N}`
- pub fn with_run_index(mut self, run_index: u32) -> Self {
- self.run_part_id = format!("{}-{{\"run-index\":{}}}", self.run_part_id, run_index);
- self.metadata
- .insert("run-index".to_string(), json!(run_index));
+ /// The suffix is a structured key-value map that gets:
+ /// - serialized as JSON and appended to `run_part_id` (e.g. `job_name-{"executor":"valgrind","run-index":0}`)
+ /// - merged into the `metadata` field
+ ///
+ /// This is used to differentiate multiple uploads within the same run
+ /// (e.g., different executors, or multiple invocations in the same CI job).
+ pub fn with_suffix(mut self, suffix: BTreeMap) -> Self {
+ if suffix.is_empty() {
+ return self;
+ }
+ let suffix_str = serde_json::to_string(&suffix).expect("Unable to serialize suffix");
+ self.run_part_id = format!("{}-{}", self.run_part_id, suffix_str);
+ self.metadata.extend(suffix);
self
}
}
diff --git a/src/run_environment/local/provider.rs b/src/run_environment/local/provider.rs
index 2c245b3b..9d1d528c 100644
--- a/src/run_environment/local/provider.rs
+++ b/src/run_environment/local/provider.rs
@@ -5,8 +5,8 @@ use uuid::Uuid;
use crate::api_client::{CodSpeedAPIClient, GetOrCreateProjectRepositoryVars, GetRepositoryVars};
use crate::cli::run::helpers::{GitRemote, find_repository_root, parse_git_remote};
+use crate::executor::config::OrchestratorConfig;
use crate::executor::config::RepositoryOverride;
-use crate::executor::{Config, ExecutorName};
use crate::local_logger::get_local_logger;
use crate::prelude::*;
use crate::run_environment::interfaces::{
@@ -14,8 +14,8 @@ use crate::run_environment::interfaces::{
};
use crate::run_environment::provider::{RunEnvironmentDetector, RunEnvironmentProvider};
use crate::run_environment::{RunEnvironment, RunPart};
-use crate::system::SystemInfo;
-use crate::upload::{LATEST_UPLOAD_METADATA_VERSION, ProfileArchive, Runner, UploadMetadata};
+use serde_json::Value;
+use std::collections::BTreeMap;
static FAKE_COMMIT_REF: &str = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
@@ -48,7 +48,7 @@ struct ResolvedRepository {
}
impl LocalProvider {
- pub async fn new(config: &Config, api_client: &CodSpeedAPIClient) -> Result {
+ pub async fn new(config: &OrchestratorConfig, api_client: &CodSpeedAPIClient) -> Result {
let current_dir = std::env::current_dir()?;
let git_context = Self::find_git_context(¤t_dir);
@@ -59,7 +59,7 @@ impl LocalProvider {
let resolved = Self::resolve_repository(config, api_client, git_context.as_ref()).await?;
- let expected_run_parts_count = config.modes.len() as u32;
+ let expected_run_parts_count = config.expected_run_parts_count();
Ok(Self {
repository_provider: resolved.provider,
@@ -86,7 +86,7 @@ impl LocalProvider {
/// Resolve repository information from override, git remote, or API fallback
async fn resolve_repository(
- config: &Config,
+ config: &OrchestratorConfig,
api_client: &CodSpeedAPIClient,
git_context: Option<&GitContext>,
) -> Result {
@@ -246,7 +246,9 @@ impl RunEnvironmentProvider for LocalProvider {
event: self.event.clone(),
gh_data: None,
gl_data: None,
- local_data: None,
+ local_data: Some(LocalData {
+ expected_run_parts_count: self.expected_run_parts_count,
+ }),
sender: None,
owner: self.owner.clone(),
repository: self.repository.clone(),
@@ -255,51 +257,23 @@ impl RunEnvironmentProvider for LocalProvider {
})
}
- async fn get_upload_metadata(
- &self,
- config: &Config,
- system_info: &SystemInfo,
- profile_archive: &ProfileArchive,
- executor_name: ExecutorName,
- ) -> Result {
- let mut run_environment_metadata = self.get_run_environment_metadata()?;
-
- run_environment_metadata.local_data = Some(LocalData {
- expected_run_parts_count: self.expected_run_parts_count,
- });
-
- let run_part = Some(RunPart {
+ fn get_run_provider_run_part(&self) -> Option {
+ Some(RunPart {
run_id: self.run_id.clone(),
- run_part_id: executor_name.to_string(),
+ run_part_id: "local-job".into(),
job_name: "local-job".into(),
metadata: Default::default(),
- });
-
- Ok(UploadMetadata {
- version: Some(LATEST_UPLOAD_METADATA_VERSION),
- tokenless: config.token.is_none(),
- repository_provider: self.repository_provider.clone(),
- commit_hash: run_environment_metadata.ref_.clone(),
- allow_empty: config.allow_empty,
- run_environment_metadata,
- profile_md5: profile_archive.hash.clone(),
- profile_encoding: profile_archive.content.encoding(),
- runner: Runner {
- name: "codspeed-runner".into(),
- version: crate::VERSION.into(),
- instruments: config.instruments.get_active_instrument_names(),
- executor: executor_name,
- system_info: system_info.clone(),
- },
- run_environment: self.get_run_environment(),
- run_part,
})
}
- /// Not used here because we need the executor_name to generate the run_part_id
- /// TODO(COD-2009) Change this interface as all providers will add the executor to the run part metadata
- fn get_run_provider_run_part(&self) -> Option {
- None
+ /// Local runs don't need run-index because each invocation gets a fresh `run_id`.
+ fn build_run_part_suffix(
+ &self,
+ _run_part: &RunPart,
+ _repository_root_path: &str,
+ orchestrator_suffix: BTreeMap,
+ ) -> BTreeMap {
+ orchestrator_suffix
}
}
@@ -378,9 +352,9 @@ mod tests {
// TODO: uncomment later when we have a way to mock git repository
// #[test]
// fn test_provider_metadata() {
- // let config = Config {
+ // let config = OrchestratorConfig {
// token: Some("token".into()),
- // ..Config::test()
+ // ..OrchestratorConfig::test()
// };
// let local_provider = LocalProvider::try_from(&config).unwrap();
// let provider_metadata = local_provider.get_provider_metadata().unwrap();
diff --git a/src/run_environment/mod.rs b/src/run_environment/mod.rs
index 0c4ff2de..e5075296 100644
--- a/src/run_environment/mod.rs
+++ b/src/run_environment/mod.rs
@@ -9,7 +9,7 @@ use local::LocalProvider;
use provider::RunEnvironmentDetector;
use crate::api_client::CodSpeedAPIClient;
-use crate::executor::Config;
+use crate::executor::config::OrchestratorConfig;
use crate::prelude::*;
pub use self::interfaces::*;
@@ -22,7 +22,7 @@ mod gitlab_ci;
mod local;
pub async fn get_provider(
- config: &Config,
+ config: &OrchestratorConfig,
api_client: &CodSpeedAPIClient,
) -> Result> {
let mut provider: Box = {
diff --git a/src/run_environment/provider.rs b/src/run_environment/provider.rs
index 90464aae..ae142412 100644
--- a/src/run_environment/provider.rs
+++ b/src/run_environment/provider.rs
@@ -1,8 +1,10 @@
use async_trait::async_trait;
use git2::Repository;
+use serde_json::Value;
use simplelog::SharedLogger;
+use std::collections::BTreeMap;
-use crate::executor::{Config, ExecutorName};
+use crate::executor::{ExecutorConfig, ExecutorName, OrchestratorConfig};
use crate::prelude::*;
use crate::system::SystemInfo;
use crate::upload::{
@@ -42,6 +44,41 @@ pub trait RunEnvironmentProvider {
/// Return the metadata necessary to identify the `RunPart`
fn get_run_provider_run_part(&self) -> Option;
+ /// Build the suffix that differentiates uploads within the same run part.
+ ///
+ /// The suffix is a structured key-value map appended to `run_part_id` via
+ /// [`RunPart::with_suffix`]. `orchestrator_suffix` contains data from the
+ /// orchestrator (e.g. `{"executor": "valgrind"}`).
+ ///
+ /// The default implementation adds a `run-index` to support multiple CLI
+ /// invocations in the same CI job. Providers where each invocation gets a
+ /// fresh `run_id` (e.g. local) should override to skip the run-index.
+ fn build_run_part_suffix(
+ &self,
+ run_part: &RunPart,
+ repository_root_path: &str,
+ orchestrator_suffix: BTreeMap,
+ ) -> BTreeMap {
+ let mut suffix = orchestrator_suffix;
+
+ let run_index_state = RunIndexState::new(
+ repository_root_path,
+ &run_part.run_id,
+ &run_part.run_part_id,
+ );
+ match run_index_state.get_and_increment() {
+ Ok(run_index) => {
+ suffix.insert("run-index".to_string(), Value::from(run_index));
+ }
+ Err(e) => {
+ warn!("Failed to track run index: {e}. Continuing with index 0.");
+ suffix.insert("run-index".to_string(), Value::from(0));
+ }
+ }
+
+ suffix
+ }
+
/// Get the OIDC audience that must be used when requesting OIDC tokens.
///
/// It will be validated when the token is used to authenticate with CodSpeed.
@@ -50,7 +87,7 @@ pub trait RunEnvironmentProvider {
}
/// Check the OIDC configuration for the current run environment, if supported.
- fn check_oidc_configuration(&mut self, _config: &Config) -> Result<()> {
+ fn check_oidc_configuration(&mut self, _config: &OrchestratorConfig) -> Result<()> {
Ok(())
}
@@ -63,53 +100,33 @@ pub trait RunEnvironmentProvider {
///
/// Warning: OIDC tokens are typically short-lived. This method must be called
/// just before the upload step to ensure the token is valid during the upload.
- async fn set_oidc_token(&self, _config: &mut Config) -> Result<()> {
+ async fn set_oidc_token(&self, _config: &mut ExecutorConfig) -> Result<()> {
Ok(())
}
/// Returns the metadata necessary for uploading results to CodSpeed.
///
- /// # Arguments
- ///
- /// * `config` - A reference to the configuration.
- /// * `archive_hash` - The hash of the archive to be uploaded.
- /// * `instruments` - A reference to the active instruments.
- ///
- /// # Example
- ///
- /// ```rust,ignore
- /// let provider = MyCIProvider::new();
- /// let config = Config::new();
- /// let instruments = Instruments::new();
- /// let metadata = provider.get_upload_metadata(&config, "abc123").await.unwrap();
- /// ```
+ /// `orchestrator_run_part_suffix` is structured data from the orchestrator used to differentiate
+ /// uploads within the same run (e.g. `{"executor": "valgrind"}`).
async fn get_upload_metadata(
&self,
- config: &Config,
+ config: &ExecutorConfig,
system_info: &SystemInfo,
profile_archive: &ProfileArchive,
executor_name: ExecutorName,
+ orchestrator_run_part_suffix: BTreeMap,
) -> Result {
let run_environment_metadata = self.get_run_environment_metadata()?;
let commit_hash = self.get_commit_hash(&run_environment_metadata.repository_root_path)?;
- // Apply run index suffix to run_part if applicable.
- // This differentiates multiple uploads within the same CI job execution
- // (e.g., running both simulation and memory benchmarks in the same job).
let run_part = self.get_run_provider_run_part().map(|run_part| {
- let run_index_state = RunIndexState::new(
+ let suffix = self.build_run_part_suffix(
+ &run_part,
&run_environment_metadata.repository_root_path,
- &run_part.run_id,
- &run_part.run_part_id,
+ orchestrator_run_part_suffix,
);
- match run_index_state.get_and_increment() {
- Ok(run_index) => run_part.with_run_index(run_index),
- Err(e) => {
- warn!("Failed to track run index: {e}. Continuing with index 0.");
- run_part.with_run_index(0)
- }
- }
+ run_part.with_suffix(suffix)
});
Ok(UploadMetadata {
diff --git a/src/upload/uploader.rs b/src/upload/uploader.rs
index 76fd9862..a4c22cff 100644
--- a/src/upload/uploader.rs
+++ b/src/upload/uploader.rs
@@ -1,5 +1,5 @@
-use crate::executor::Config;
use crate::executor::ExecutionContext;
+use crate::executor::ExecutorConfig;
use crate::executor::ExecutorName;
use crate::executor::Orchestrator;
use crate::run_environment::RunEnvironment;
@@ -11,6 +11,8 @@ use crate::{
use async_compression::tokio::write::GzipEncoder;
use console::style;
use reqwest::StatusCode;
+use serde_json::Value;
+use std::collections::BTreeMap;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tokio_tar::Builder;
@@ -120,11 +122,12 @@ async fn create_profile_archive(
}
async fn retrieve_upload_data(
- config: &Config,
+ orchestrator: &Orchestrator,
+ config: &ExecutorConfig,
upload_metadata: &UploadMetadata,
) -> Result {
let mut upload_request = REQUEST_CLIENT
- .post(config.upload_url.clone())
+ .post(orchestrator.config.upload_url.clone())
.json(&upload_metadata);
if !upload_metadata.tokenless {
upload_request = upload_request.header("Authorization", config.token.clone().unwrap());
@@ -249,6 +252,7 @@ pub async fn upload(
orchestrator: &Orchestrator,
execution_context: &ExecutionContext,
executor_name: ExecutorName,
+ run_part_suffix: BTreeMap,
) -> Result {
let profile_archive =
create_profile_archive(&execution_context.profile_folder, executor_name.clone()).await?;
@@ -265,6 +269,7 @@ pub async fn upload(
&orchestrator.system_info,
&profile_archive,
executor_name,
+ run_part_suffix,
)
.await?;
debug!("Upload metadata: {upload_metadata:#?}");
@@ -274,7 +279,8 @@ pub async fn upload(
}
debug!("Preparing upload...");
- let upload_data = retrieve_upload_data(&execution_context.config, &upload_metadata).await?;
+ let upload_data =
+ retrieve_upload_data(orchestrator, &execution_context.config, &upload_metadata).await?;
debug!("runId: {}", upload_data.run_id);
debug!(
@@ -304,15 +310,25 @@ mod tests {
#[ignore]
#[tokio::test]
async fn test_upload() {
- let mut config = Config {
- command: "pytest tests/ --codspeed".into(),
+ use crate::executor::OrchestratorConfig;
+
+ let orchestrator_config = OrchestratorConfig {
upload_url: Url::parse("change me").unwrap(),
token: Some("change me".into()),
profile_folder: Some(PathBuf::from(format!(
"{}/src/uploader/samples/adrien-python-test",
env!("CARGO_MANIFEST_DIR")
))),
- ..Config::test()
+ ..OrchestratorConfig::test()
+ };
+ let executor_config = ExecutorConfig {
+ command: "pytest tests/ --codspeed".into(),
+ token: Some("change me".into()),
+ profile_folder: Some(PathBuf::from(format!(
+ "{}/src/uploader/samples/adrien-python-test",
+ env!("CARGO_MANIFEST_DIR")
+ ))),
+ ..ExecutorConfig::test()
};
async_with_vars(
[
@@ -345,14 +361,22 @@ mod tests {
async {
let codspeed_config = CodSpeedConfig::default();
let api_client = CodSpeedAPIClient::create_test_client();
- let orchestrator = Orchestrator::new(&mut config, &codspeed_config, &api_client)
- .await
- .expect("Failed to create Orchestrator for test");
- let execution_context =
- ExecutionContext::new(config).expect("Failed to create ExecutionContext");
- upload(&orchestrator, &execution_context, ExecutorName::Valgrind)
- .await
- .unwrap();
+ let orchestrator =
+ Orchestrator::new(orchestrator_config, &codspeed_config, &api_client)
+ .await
+ .expect("Failed to create Orchestrator for test");
+ let execution_context = ExecutionContext::new(executor_config)
+ .expect("Failed to create ExecutionContext");
+ let run_part_suffix =
+ BTreeMap::from([("executor".to_string(), Value::from("valgrind"))]);
+ upload(
+ &orchestrator,
+ &execution_context,
+ ExecutorName::Valgrind,
+ run_part_suffix,
+ )
+ .await
+ .unwrap();
},
)
.await;