Skip to content

Profiling Tools

provide.testkit.quality.profiling

Performance profiling and analysis for provide-testkit.

Provides performance profiling analysis using memray, cProfile, and other tools. Integrates with the quality framework for comprehensive performance analysis.

Features: - Memory profiling with memray - CPU profiling with cProfile - Performance regression detection - Integration with quality gates - Configurable profiling options

Usage

Basic memory profiling

def test_with_profiling(profiling_fixture): result = profiling_fixture.profile_memory(function, *args) assert result.passed

CPU profiling with quality gates

runner = QualityRunner() results = runner.run_with_gates(path, {"profiling": {"max_memory_mb": 100}})

Classes

ProfilingFixture

ProfilingFixture(
    config: dict[str, Any] | None = None,
    artifact_dir: Path | None = None,
)

Bases: BaseQualityFixture

Pytest fixture for performance profiling.

Provides easy access to performance profiling with automatic setup and teardown. Integrates with the quality framework fixtures.

Initialize profiling fixture.

Parameters:

Name Type Description Default
config dict[str, Any] | None

Profiler configuration

None
artifact_dir Path | None

Directory for artifacts

None
Source code in provide/testkit/quality/profiling/fixture.py
def __init__(self, config: dict[str, Any] | None = None, artifact_dir: Path | None = None) -> None:
    """Initialize profiling fixture.

    Args:
        config: Profiler configuration
        artifact_dir: Directory for artifacts
    """
    super().__init__(config or {}, artifact_dir)
    self.profiler: PerformanceProfiler | None = None
Functions
setup
setup() -> None

Set up performance profiler.

Source code in provide/testkit/quality/profiling/fixture.py
def setup(self) -> None:
    """Set up performance profiler."""
    self.profiler = PerformanceProfiler(self.config)
    self._setup_complete = True
profile_function
profile_function(
    func: Callable[..., Any], *args: Any, **kwargs: Any
) -> dict[str, Any]

Profile a function's performance.

Parameters:

Name Type Description Default
func Callable[..., Any]

Function to profile

required
*args Any

Function arguments

()
**kwargs Any

Function keyword arguments

{}

Returns:

Type Description
dict[str, Any]

Profiling results as dict

Source code in provide/testkit/quality/profiling/fixture.py
def profile_function(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> dict[str, Any]:
    """Profile a function's performance.

    Args:
        func: Function to profile
        *args: Function arguments
        **kwargs: Function keyword arguments

    Returns:
        Profiling results as dict
    """
    if not self.profiler:
        return {"error": "Profiler not available"}

    result = self.profiler.profile_function(func, *args, **kwargs)
    self.add_result(result)

    return {
        "passed": result.passed,
        "score": result.score,
        "memory": result.details.get("memory", {}),
        "cpu": result.details.get("cpu", {}),
        "scores": result.details.get("scores", {}),
        "thresholds": result.details.get("thresholds", {}),
        "execution_time": result.execution_time,
        "function_result": (
            result.details.get("memory", {}).get("function_result")
            or result.details.get("cpu", {}).get("function_result")
        ),
    }
profile_memory
profile_memory(
    func: Callable[..., Any], *args: Any, **kwargs: Any
) -> dict[str, Any]

Profile memory usage only.

Parameters:

Name Type Description Default
func Callable[..., Any]

Function to profile

required
*args Any

Function arguments

()
**kwargs Any

Function keyword arguments

{}

Returns:

Type Description
dict[str, Any]

Memory profiling results

Source code in provide/testkit/quality/profiling/fixture.py
def profile_memory(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> dict[str, Any]:
    """Profile memory usage only.

    Args:
        func: Function to profile
        *args: Function arguments
        **kwargs: Function keyword arguments

    Returns:
        Memory profiling results
    """
    if not self._setup_complete:
        self.setup()

    # Configure for memory-only profiling
    original_config = self.config.copy()
    self.config.update({"profile_memory": True, "profile_cpu": False})

    # Recreate profiler with updated config
    self.profiler = PerformanceProfiler(self.config)

    try:
        return self.profile_function(func, *args, **kwargs)
    finally:
        # Restore original config
        self.config = original_config
        self.profiler = PerformanceProfiler(self.config)
profile_cpu
profile_cpu(
    func: Callable[..., Any], *args: Any, **kwargs: Any
) -> dict[str, Any]

Profile CPU usage only.

Parameters:

Name Type Description Default
func Callable[..., Any]

Function to profile

required
*args Any

Function arguments

()
**kwargs Any

Function keyword arguments

{}

Returns:

Type Description
dict[str, Any]

CPU profiling results

Source code in provide/testkit/quality/profiling/fixture.py
def profile_cpu(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> dict[str, Any]:
    """Profile CPU usage only.

    Args:
        func: Function to profile
        *args: Function arguments
        **kwargs: Function keyword arguments

    Returns:
        CPU profiling results
    """
    if not self._setup_complete:
        self.setup()

    # Configure for CPU-only profiling
    original_config = self.config.copy()
    self.config.update({"profile_memory": False, "profile_cpu": True})

    # Recreate profiler with updated config
    self.profiler = PerformanceProfiler(self.config)

    try:
        return self.profile_function(func, *args, **kwargs)
    finally:
        # Restore original config
        self.config = original_config
        self.profiler = PerformanceProfiler(self.config)
benchmark_function
benchmark_function(
    func: Callable[..., Any],
    iterations: int = 100,
    *args: Any,
    **kwargs: Any
) -> dict[str, Any]

Benchmark a function over multiple iterations.

Parameters:

Name Type Description Default
func Callable[..., Any]

Function to benchmark

required
iterations int

Number of iterations to run

100
*args Any

Function arguments

()
**kwargs Any

Function keyword arguments

{}

Returns:

Type Description
dict[str, Any]

Benchmark results with statistics

Source code in provide/testkit/quality/profiling/fixture.py
def benchmark_function(
    self, func: Callable[..., Any], iterations: int = 100, *args: Any, **kwargs: Any
) -> dict[str, Any]:
    """Benchmark a function over multiple iterations.

    Args:
        func: Function to benchmark
        iterations: Number of iterations to run
        *args: Function arguments
        **kwargs: Function keyword arguments

    Returns:
        Benchmark results with statistics
    """
    if not self._setup_complete:
        self.setup()

    import statistics

    execution_times = []
    memory_peaks = []

    for _ in range(iterations):
        result = self.profile_function(func, *args, **kwargs)

        if result.get("cpu", {}).get("execution_time"):
            execution_times.append(result["cpu"]["execution_time"])

        if result.get("memory", {}).get("peak_memory_mb"):
            memory_peaks.append(result["memory"]["peak_memory_mb"])

    # Calculate statistics
    benchmark_stats = {}

    if execution_times:
        benchmark_stats["execution_time"] = {
            "mean": statistics.mean(execution_times),
            "median": statistics.median(execution_times),
            "min": min(execution_times),
            "max": max(execution_times),
            "stdev": statistics.stdev(execution_times) if len(execution_times) > 1 else 0,
            "iterations": len(execution_times),
        }

    if memory_peaks:
        benchmark_stats["memory_usage"] = {
            "mean_mb": statistics.mean(memory_peaks),
            "median_mb": statistics.median(memory_peaks),
            "min_mb": min(memory_peaks),
            "max_mb": max(memory_peaks),
            "stdev_mb": statistics.stdev(memory_peaks) if len(memory_peaks) > 1 else 0,
            "iterations": len(memory_peaks),
        }

    return {
        "benchmark_stats": benchmark_stats,
        "iterations": iterations,
        "total_profiling_runs": len(self.results),
    }
assert_performance
assert_performance(
    func: Callable[..., Any],
    max_memory_mb: float | None = None,
    max_execution_time: float | None = None,
    min_score: float | None = None,
    *args: Any,
    **kwargs: Any
) -> None

Assert performance requirements for a function.

Parameters:

Name Type Description Default
func Callable[..., Any]

Function to test

required
max_memory_mb float | None

Maximum memory usage in MB

None
max_execution_time float | None

Maximum execution time in seconds

None
min_score float | None

Minimum performance score

None
*args Any

Function arguments

()
**kwargs Any

Function keyword arguments

{}

Raises:

Type Description
AssertionError

If performance requirements are not met

Source code in provide/testkit/quality/profiling/fixture.py
def assert_performance(
    self,
    func: Callable[..., Any],
    max_memory_mb: float | None = None,
    max_execution_time: float | None = None,
    min_score: float | None = None,
    *args: Any,
    **kwargs: Any,
) -> None:
    """Assert performance requirements for a function.

    Args:
        func: Function to test
        max_memory_mb: Maximum memory usage in MB
        max_execution_time: Maximum execution time in seconds
        min_score: Minimum performance score
        *args: Function arguments
        **kwargs: Function keyword arguments

    Raises:
        AssertionError: If performance requirements are not met
    """
    if not self._setup_complete:
        self.setup()

    # Update config with assertion requirements
    if max_memory_mb is not None:
        self.config["max_memory_mb"] = max_memory_mb
    if max_execution_time is not None:
        self.config["max_execution_time"] = max_execution_time
    if min_score is not None:
        self.config["min_score"] = min_score

    # Recreate profiler with updated config
    self.profiler = PerformanceProfiler(self.config)

    result = self.profile_function(func, *args, **kwargs)

    # Check assertions
    if not result["passed"]:
        failure_reasons = []

        if max_memory_mb and result.get("memory", {}).get("peak_memory_mb", 0) > max_memory_mb:
            actual_mb = result["memory"]["peak_memory_mb"]
            failure_reasons.append(f"Memory usage {actual_mb:.2f}MB exceeds limit {max_memory_mb}MB")

        if max_execution_time and result.get("cpu", {}).get("execution_time", 0) > max_execution_time:
            actual_time = result["cpu"]["execution_time"]
            failure_reasons.append(
                f"Execution time {actual_time:.4f}s exceeds limit {max_execution_time}s"
            )

        if min_score and result.get("score", 0) < min_score:
            actual_score = result["score"]
            failure_reasons.append(f"Performance score {actual_score:.1f}% below minimum {min_score}%")

        raise AssertionError(f"Performance requirements not met: {'; '.join(failure_reasons)}")
generate_report
generate_report(format: str = 'terminal') -> str

Generate profiling report.

Parameters:

Name Type Description Default
format str

Report format (terminal, json)

'terminal'

Returns:

Type Description
str

Formatted report

Source code in provide/testkit/quality/profiling/fixture.py
def generate_report(self, format: str = "terminal") -> str:
    """Generate profiling report.

    Args:
        format: Report format (terminal, json)

    Returns:
        Formatted report
    """
    if not self.profiler:
        return "No performance profiler available"

    if not self.results:
        return "No profiling results available"

    # Use the most recent result
    latest_result = self.results[-1]
    return self.profiler.report(latest_result, format)

PerformanceProfiler

PerformanceProfiler(config: dict[str, Any] | None = None)

Performance profiler using memray, cProfile, and tracemalloc.

Provides high-level interface for performance analysis with automatic artifact management and integration with the quality framework.

Initialize performance profiler.

Parameters:

Name Type Description Default
config dict[str, Any] | None

Profiler configuration options

None
Source code in provide/testkit/quality/profiling/profiler.py
def __init__(self, config: dict[str, Any] | None = None) -> None:
    """Initialize performance profiler.

    Args:
        config: Profiler configuration options
    """
    self.config = config or {}
    self.artifact_dir: Path | None = None
Functions
profile_function
profile_function(
    func: Callable[..., Any], *args: Any, **kwargs: Any
) -> QualityResult

Profile a function's performance.

Parameters:

Name Type Description Default
func Callable[..., Any]

Function to profile

required
*args Any

Function arguments

()
**kwargs Any

Function keyword arguments

{}

Returns:

Type Description
QualityResult

QualityResult with profiling data

Source code in provide/testkit/quality/profiling/profiler.py
def profile_function(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> QualityResult:
    """Profile a function's performance.

    Args:
        func: Function to profile
        *args: Function arguments
        **kwargs: Function keyword arguments

    Returns:
        QualityResult with profiling data
    """
    start_time = time.time()

    try:
        # Configure profiling options
        profile_memory = self.config.get("profile_memory", True)
        profile_cpu = self.config.get("profile_cpu", True)
        use_memray = self.config.get("use_memray", MEMRAY_AVAILABLE)

        results = {}

        # Memory profiling
        if profile_memory:
            if use_memray and MEMRAY_AVAILABLE:
                memory_result = self._profile_memory_memray(func, *args, **kwargs)
            else:
                memory_result = self._profile_memory_tracemalloc(func, *args, **kwargs)
            results.update(memory_result)

        # CPU profiling
        if profile_cpu:
            cpu_result = self._profile_cpu(func, *args, **kwargs)
            results.update(cpu_result)

        # Analyze results
        return self._process_profiling_results(results, time.time() - start_time)

    except Exception as e:
        return QualityResult(
            tool="profiling",
            passed=False,
            details={"error": str(e), "error_type": type(e).__name__},
            execution_time=time.time() - start_time,
        )
generate_report
generate_report(
    result: QualityResult, format: str = "terminal"
) -> str

Generate profiling report.

Parameters:

Name Type Description Default
result QualityResult

Profiling result

required
format str

Report format

'terminal'

Returns:

Type Description
str

Formatted report

Source code in provide/testkit/quality/profiling/profiler.py
def generate_report(self, result: QualityResult, format: str = "terminal") -> str:
    """Generate profiling report.

    Args:
        result: Profiling result
        format: Report format

    Returns:
        Formatted report
    """
    if format == "terminal":
        return self._generate_text_report(result)
    elif format == "json":
        return json.dumps(
            {
                "tool": result.tool,
                "passed": result.passed,
                "score": result.score,
                "details": result.details,
            },
            indent=2,
        )
    else:
        return str(result.details)
report
report(
    result: QualityResult, format: str = "terminal"
) -> str

Generate report from QualityResult (implements QualityTool protocol).

Parameters:

Name Type Description Default
result QualityResult

Profiling result

required
format str

Report format

'terminal'

Returns:

Type Description
str

Formatted report

Source code in provide/testkit/quality/profiling/profiler.py
def report(self, result: QualityResult, format: str = "terminal") -> str:
    """Generate report from QualityResult (implements QualityTool protocol).

    Args:
        result: Profiling result
        format: Report format

    Returns:
        Formatted report
    """
    return self.generate_report(result, format)