Skip to content

Runner

provide.testkit.quality.runner

Quality runner for orchestrating multiple quality tools.

Classes

QualityGateResults dataclass

QualityGateResults(
    passed: bool,
    results: dict[str, QualityResult],
    failed_gates: list[str] = None,
)

Results from running quality gates.

QualityRunner

QualityRunner(
    artifact_root: Path | None = None,
    tools: list[str] | None = None,
    config: dict[str, Any] | None = None,
)

Orchestrates multiple quality analysis tools.

Manages the execution of quality tools, artifact collection, and result aggregation with configurable quality gates.

Initialize the quality runner.

Parameters:

Name Type Description Default
artifact_root Path | None

Root directory for storing artifacts (defaults to .quality-artifacts)

None
tools list[str] | None

List of tool names to run (None for default set)

None
config dict[str, Any] | None

Configuration for tools and runner

None
Source code in provide/testkit/quality/runner.py
def __init__(
    self,
    artifact_root: Path | None = None,
    tools: list[str] | None = None,
    config: dict[str, Any] | None = None,
) -> None:
    """Initialize the quality runner.

    Args:
        artifact_root: Root directory for storing artifacts (defaults to .quality-artifacts)
        tools: List of tool names to run (None for default set)
        config: Configuration for tools and runner
    """
    self.artifact_root = Path(artifact_root) if artifact_root else Path(".quality-artifacts")
    self.config = config or {}
    self.tools = tools or self._get_default_tools()
    self.tool_instances: dict[str, QualityTool] = {}
    self._initialize_tools()
Functions
generate_summary_report
generate_summary_report(
    results: dict[str, QualityResult],
) -> str

Generate a summary report of all results.

Parameters:

Name Type Description Default
results dict[str, QualityResult]

Results to summarize

required

Returns:

Type Description
str

Summary report string

Source code in provide/testkit/quality/runner.py
def generate_summary_report(self, results: dict[str, QualityResult]) -> str:
    """Generate a summary report of all results.

    Args:
        results: Results to summarize

    Returns:
        Summary report string
    """
    lines = ["Quality Analysis Summary", "=" * 30, ""]

    total_tools = len(results)
    passed_tools = sum(1 for r in results.values() if r.passed)

    lines.append(f"Tools Run: {total_tools}")
    lines.append(f"Passed: {passed_tools}")
    lines.append(f"Failed: {total_tools - passed_tools}")
    lines.append("")

    for _tool_name, result in results.items():
        lines.append(result.summary)

    return "\n".join(lines)
get_available_tools
get_available_tools() -> list[str]

Get list of available tool names.

Source code in provide/testkit/quality/runner.py
def get_available_tools(self) -> list[str]:
    """Get list of available tool names."""
    return list(self.tool_instances.keys())
run_all
run_all(
    target: Path, **kwargs: Any
) -> dict[str, QualityResult]

Run all configured quality tools on the target.

Parameters:

Name Type Description Default
target Path

Path to analyze

required
**kwargs Any

Additional arguments passed to tools

{}

Returns:

Type Description
dict[str, QualityResult]

Dictionary mapping tool names to their results

Source code in provide/testkit/quality/runner.py
def run_all(self, target: Path, **kwargs: Any) -> dict[str, QualityResult]:
    """Run all configured quality tools on the target.

    Args:
        target: Path to analyze
        **kwargs: Additional arguments passed to tools

    Returns:
        Dictionary mapping tool names to their results
    """
    results = {}
    target = Path(target)

    for tool_name, tool in self.tool_instances.items():
        artifact_dir = self.artifact_root / tool_name
        ensure_dir(artifact_dir)

        try:
            start_time = time.time()
            result = tool.analyze(target, artifact_dir=artifact_dir, **kwargs)
            result.execution_time = time.time() - start_time

            # Save artifacts
            self._save_tool_artifacts(result, artifact_dir)
            results[tool_name] = result

        except Exception as e:
            # Create failed result for tool
            results[tool_name] = QualityResult(
                tool=tool_name, passed=False, details={"error": str(e), "error_type": type(e).__name__}
            )

    return results
run_tools
run_tools(
    target: Path,
    tools: list[str] | None = None,
    artifact_dir: Path | None = None,
    tool_configs: dict[str, Any] | None = None,
) -> dict[str, QualityResult]

Run specific quality tools on the target.

Parameters:

Name Type Description Default
target Path

Path to analyze

required
tools list[str] | None

List of tool names to run (None for all available)

None
artifact_dir Path | None

Directory for artifacts (overrides default)

None
tool_configs dict[str, Any] | None

Configuration for tools

None

Returns:

Type Description
dict[str, QualityResult]

Dictionary mapping tool names to their results

Source code in provide/testkit/quality/runner.py
def run_tools(
    self,
    target: Path,
    tools: list[str] | None = None,
    artifact_dir: Path | None = None,
    tool_configs: dict[str, Any] | None = None,
) -> dict[str, QualityResult]:
    """Run specific quality tools on the target.

    Args:
        target: Path to analyze
        tools: List of tool names to run (None for all available)
        artifact_dir: Directory for artifacts (overrides default)
        tool_configs: Configuration for tools

    Returns:
        Dictionary mapping tool names to their results
    """
    if artifact_dir:
        original_artifact_root = self.artifact_root
        self.artifact_root = artifact_dir

    if tool_configs:
        original_config = self.config
        self.config = tool_configs
        # Re-initialize tools with new config
        self._initialize_tools()

    # Filter tools if specified
    if tools:
        filtered_instances = {name: tool for name, tool in self.tool_instances.items() if name in tools}
        original_instances = self.tool_instances
        self.tool_instances = filtered_instances

    try:
        results = self.run_all(target)
        return results
    finally:
        # Restore original state
        if artifact_dir:
            self.artifact_root = original_artifact_root
        if tool_configs:
            self.config = original_config
            self._initialize_tools()
        if tools:
            self.tool_instances = original_instances
run_with_gates
run_with_gates(
    target: Path, gates: dict[str, Any], **kwargs: Any
) -> tuple[bool, dict[str, QualityResult]]

Run quality tools and check against quality gates.

Parameters:

Name Type Description Default
target Path

Path to analyze

required
gates dict[str, Any]

Quality gate requirements

required
**kwargs Any

Additional arguments passed to tools

{}

Returns:

Type Description
tuple[bool, dict[str, QualityResult]]

Tuple of (all_gates_passed, results)

Source code in provide/testkit/quality/runner.py
def run_with_gates(
    self, target: Path, gates: dict[str, Any], **kwargs: Any
) -> tuple[bool, dict[str, QualityResult]]:
    """Run quality tools and check against quality gates.

    Args:
        target: Path to analyze
        gates: Quality gate requirements
        **kwargs: Additional arguments passed to tools

    Returns:
        Tuple of (all_gates_passed, results)
    """
    results = self.run_all(target, **kwargs)
    passed = self._check_gates(results, gates)
    return passed, results

Functions