Skip to content

Profiling

provide.foundation.profiling

TODO: Add module docstring.

Classes

ExporterError

ExporterError(
    message: str,
    *,
    exporter_name: str | None = None,
    endpoint: str | None = None,
    retry_count: int | None = None,
    **kwargs: Any
)

Bases: ProfilingError

Raised when metric export operations fail.

Parameters:

Name Type Description Default
message str

Export error message.

required
exporter_name str | None

Optional name of the exporter that failed.

None
endpoint str | None

Optional endpoint URL that failed.

None
retry_count int | None

Optional number of retries attempted.

None
**kwargs Any

Additional context passed to ProfilingError.

{}

Examples:

>>> raise ExporterError("Failed to connect to Prometheus")
>>> raise ExporterError("Export timeout", exporter_name="datadog", retry_count=3)
Source code in provide/foundation/errors/profiling.py
def __init__(
    self,
    message: str,
    *,
    exporter_name: str | None = None,
    endpoint: str | None = None,
    retry_count: int | None = None,
    **kwargs: Any,
) -> None:
    if exporter_name:
        kwargs.setdefault("context", {})["exporter.name"] = exporter_name
    if endpoint:
        kwargs.setdefault("context", {})["exporter.endpoint"] = endpoint
    if retry_count is not None:
        kwargs.setdefault("context", {})["exporter.retry_count"] = retry_count
    super().__init__(message, **kwargs)

MetricsError

MetricsError(
    message: str,
    *,
    metric_name: str | None = None,
    metric_value: Any = None,
    **kwargs: Any
)

Bases: ProfilingError

Raised when metrics collection operations fail.

Parameters:

Name Type Description Default
message str

Metrics error message.

required
metric_name str | None

Optional name of the metric that failed.

None
metric_value Any

Optional value that caused the error.

None
**kwargs Any

Additional context passed to ProfilingError.

{}

Examples:

>>> raise MetricsError("Invalid metric value")
>>> raise MetricsError("Metric overflow", metric_name="latency_ms")
Source code in provide/foundation/errors/profiling.py
def __init__(
    self,
    message: str,
    *,
    metric_name: str | None = None,
    metric_value: Any = None,
    **kwargs: Any,
) -> None:
    if metric_name:
        kwargs.setdefault("context", {})["metrics.name"] = metric_name
    if metric_value is not None:
        kwargs.setdefault("context", {})["metrics.value"] = metric_value
    super().__init__(message, **kwargs)

ProfileMetrics

ProfileMetrics()

Thread-safe metrics collection for profiling Foundation performance.

Tracks message processing performance, emoji overhead, and throughput metrics for Foundation's logging infrastructure.

Example

metrics = ProfileMetrics() metrics.record_message(duration_ns=1500000, has_emoji=True, field_count=5) print(f"Avg latency: {metrics.avg_latency_ms:.2f}ms") print(f"Throughput: {metrics.messages_per_second:.0f} msg/sec")

Initialize metrics with zero values and current timestamp.

Source code in provide/foundation/profiling/metrics.py
def __init__(self) -> None:
    """Initialize metrics with zero values and current timestamp."""
    self._lock = threading.Lock()
    self.reset()
Attributes
avg_fields_per_message property
avg_fields_per_message: float

Calculate average number of fields per message.

avg_latency_ms property
avg_latency_ms: float

Calculate average processing latency in milliseconds.

emoji_overhead_percent property
emoji_overhead_percent: float

Calculate percentage of messages with emoji processing.

messages_per_second property
messages_per_second: float

Calculate messages per second since start time.

Functions
record_message
record_message(
    duration_ns: int, has_emoji: bool, field_count: int
) -> None

Record a processed message with timing and metadata.

Parameters:

Name Type Description Default
duration_ns int

Processing duration in nanoseconds

required
has_emoji bool

Whether the message contained emoji processing

required
field_count int

Number of fields in the log event

required
Source code in provide/foundation/profiling/metrics.py
def record_message(
    self,
    duration_ns: int,
    has_emoji: bool,
    field_count: int,
) -> None:
    """Record a processed message with timing and metadata.

    Args:
        duration_ns: Processing duration in nanoseconds
        has_emoji: Whether the message contained emoji processing
        field_count: Number of fields in the log event

    """
    with self._lock:
        self.message_count += 1
        self.total_duration_ns += duration_ns

        if has_emoji:
            self.emoji_message_count += 1

        # Track field complexity (for future analysis)
        self._total_field_count += field_count
reset
reset() -> None

Reset all metrics to initial values with new start time.

Source code in provide/foundation/profiling/metrics.py
def reset(self) -> None:
    """Reset all metrics to initial values with new start time."""
    with self._lock:
        self.message_count = 0
        self.total_duration_ns = 0
        self.emoji_message_count = 0
        self.dropped_count = 0
        self.start_time = time.time()
        self._total_field_count = 0
to_dict
to_dict() -> dict[str, Any]

Serialize metrics to dictionary for JSON output.

Returns:

Type Description
dict[str, Any]

Dictionary containing all current metrics

Source code in provide/foundation/profiling/metrics.py
def to_dict(self) -> dict[str, Any]:
    """Serialize metrics to dictionary for JSON output.

    Returns:
        Dictionary containing all current metrics

    """
    with self._lock:
        # Calculate metrics directly to avoid deadlock from property calls
        elapsed = time.time() - self.start_time
        messages_per_second = self.message_count / elapsed if elapsed > 0 else 0.0
        avg_latency_ms = (
            (self.total_duration_ns / self.message_count) / 1_000_000 if self.message_count > 0 else 0.0
        )
        emoji_overhead_percent = (
            (self.emoji_message_count / self.message_count) * 100 if self.message_count > 0 else 0.0
        )
        avg_fields_per_message = (
            self._total_field_count / self.message_count if self.message_count > 0 else 0.0
        )

        return {
            "messages_per_second": round(messages_per_second, 2),
            "avg_latency_ms": round(avg_latency_ms, 4),
            "emoji_overhead_percent": round(emoji_overhead_percent, 1),
            "total_messages": self.message_count,
            "emoji_messages": self.emoji_message_count,
            "dropped_messages": self.dropped_count,
            "avg_fields_per_message": round(avg_fields_per_message, 1),
            "uptime_seconds": round(elapsed, 1),
        }

ProfilingComponent

ProfilingComponent(
    sample_rate: float = DEFAULT_PROFILING_SAMPLE_RATE,
)

Hub component for managing Foundation performance profiling.

Integrates profiling functionality into Foundation's Hub architecture, providing centralized configuration and lifecycle management for performance monitoring.

Example

from provide.foundation.hub import Hub from provide.foundation.profiling import register_profiling

hub = Hub() register_profiling(hub) profiler = hub.get_component("profiler") profiler.enable()

Metrics are automatically collected

metrics = profiler.get_metrics() print(f"Throughput: {metrics.messages_per_second:.0f} msg/sec")

Initialize profiling component.

Parameters:

Name Type Description Default
sample_rate float

Fraction of messages to sample for metrics

DEFAULT_PROFILING_SAMPLE_RATE
Source code in provide/foundation/profiling/component.py
def __init__(self, sample_rate: float = DEFAULT_PROFILING_SAMPLE_RATE) -> None:
    """Initialize profiling component.

    Args:
        sample_rate: Fraction of messages to sample for metrics

    """
    self.processor = ProfilingProcessor(sample_rate=sample_rate)
    self.enabled = False
Functions
__repr__
__repr__() -> str

String representation for debugging.

Source code in provide/foundation/profiling/component.py
def __repr__(self) -> str:
    """String representation for debugging."""
    status = "enabled" if self.enabled else "disabled"
    sample_rate = self.processor.sample_rate
    return f"ProfilingComponent(enabled={status}, sample_rate={sample_rate})"
disable
disable() -> None

Disable profiling metrics collection.

Source code in provide/foundation/profiling/component.py
@resilient(
    fallback=None,
    context_provider=lambda: {"component": "profiler"},
)
def disable(self) -> None:
    """Disable profiling metrics collection."""
    if not self.enabled:
        return

    # Disable the processor
    self.processor.disable()
    self.enabled = False

    # Log that profiling is disabled
    try:
        from provide.foundation import logger

        logger.info("Profiling disabled", emoji="📊", component="profiler")
    except (ImportError, AttributeError):
        # Don't fail if logging isn't available or not fully initialized
        pass
enable
enable() -> None

Enable profiling metrics collection.

This method is safe to call multiple times - it will not re-enable if already enabled.

Source code in provide/foundation/profiling/component.py
@resilient(
    fallback=None,
    context_provider=lambda: {"component": "profiler"},
)
def enable(self) -> None:
    """Enable profiling metrics collection.

    This method is safe to call multiple times - it will not
    re-enable if already enabled.

    """
    if self.enabled:
        return

    # Enable the processor
    self.processor.enable()
    self.enabled = True

    # Log that profiling is enabled using Foundation's logger
    try:
        from provide.foundation import logger

        logger.info("Profiling enabled", emoji="📊", component="profiler")
    except (ImportError, AttributeError):
        # Don't fail if logging isn't available or not fully initialized
        pass
get_metrics
get_metrics() -> ProfileMetrics

Get current profiling metrics.

Returns:

Type Description
ProfileMetrics

Current ProfileMetrics instance with collected data

Source code in provide/foundation/profiling/component.py
def get_metrics(self) -> ProfileMetrics:
    """Get current profiling metrics.

    Returns:
        Current ProfileMetrics instance with collected data

    """
    return self.processor.get_metrics()
reset
reset() -> None

Reset profiling metrics to initial values.

Useful for testing and periodic metric collection.

Source code in provide/foundation/profiling/component.py
@resilient(
    fallback=None,
    context_provider=lambda: {"component": "profiler", "operation": "reset"},
)
def reset(self) -> None:
    """Reset profiling metrics to initial values.

    Useful for testing and periodic metric collection.

    """
    self.processor.reset()

    # Log the reset operation
    try:
        from provide.foundation import logger

        logger.debug("Profiling metrics reset", emoji="🔄", component="profiler")
    except (ImportError, AttributeError):
        # Don't fail if logging isn't available or not fully initialized
        pass

ProfilingError

ProfilingError(
    message: str,
    *,
    component: str | None = None,
    sample_rate: float | None = None,
    **kwargs: Any
)

Bases: FoundationError

Raised when profiling operations fail.

Parameters:

Name Type Description Default
message str

Error message describing the profiling issue.

required
component str | None

Optional profiling component that caused the error.

None
sample_rate float | None

Optional sample rate when the error occurred.

None
**kwargs Any

Additional context passed to FoundationError.

{}

Examples:

>>> raise ProfilingError("Profiling initialization failed")
>>> raise ProfilingError("Invalid sample rate", sample_rate=1.5)
Source code in provide/foundation/errors/profiling.py
def __init__(
    self,
    message: str,
    *,
    component: str | None = None,
    sample_rate: float | None = None,
    **kwargs: Any,
) -> None:
    if component:
        kwargs.setdefault("context", {})["profiling.component"] = component
    if sample_rate is not None:
        kwargs.setdefault("context", {})["profiling.sample_rate"] = sample_rate
    super().__init__(message, **kwargs)

ProfilingProcessor

ProfilingProcessor(
    sample_rate: float = DEFAULT_PROFILING_SAMPLE_RATE,
)

Structlog processor that collects performance metrics via sampling.

This processor integrates into Foundation's existing structlog pipeline to collect metrics about message processing performance, emoji overhead, and throughput with configurable sampling to minimize performance impact.

Example

processor = ProfilingProcessor(sample_rate=0.01) # 1% sampling

Add to structlog processor chain

processors.append(processor)

Later, get metrics

metrics = processor.get_metrics() print(f"Processing {metrics.messages_per_second:.0f} msg/sec")

Initialize profiling processor with sampling configuration.

Parameters:

Name Type Description Default
sample_rate float

Fraction of messages to sample (0.0 to 1.0) 0.01 = 1% sampling for minimal overhead

DEFAULT_PROFILING_SAMPLE_RATE
Source code in provide/foundation/profiling/processor.py
def __init__(self, sample_rate: float = DEFAULT_PROFILING_SAMPLE_RATE) -> None:
    """Initialize profiling processor with sampling configuration.

    Args:
        sample_rate: Fraction of messages to sample (0.0 to 1.0)
                    0.01 = 1% sampling for minimal overhead

    """
    if not 0.0 <= sample_rate <= 1.0:
        raise SamplingError("Sample rate must be between 0.0 and 1.0", sample_rate=sample_rate)

    self.sample_rate = sample_rate
    self.metrics = ProfileMetrics()
    self._enabled = True
Functions
__call__
__call__(
    logger: Any, method_name: str, event_dict: EventDict
) -> structlog.types.EventDict

Process log event and optionally collect metrics.

This is the main entry point called by structlog for each log message. Uses sampling to minimize performance overhead.

Parameters:

Name Type Description Default
logger Any

The logger instance (unused)

required
method_name str

The logging method name (unused)

required
event_dict EventDict

The event dictionary to process

required

Returns:

Type Description
EventDict

The event_dict unchanged (pass-through processor)

Source code in provide/foundation/profiling/processor.py
def __call__(
    self,
    logger: Any,
    method_name: str,
    event_dict: structlog.types.EventDict,
) -> structlog.types.EventDict:
    """Process log event and optionally collect metrics.

    This is the main entry point called by structlog for each log message.
    Uses sampling to minimize performance overhead.

    Args:
        logger: The logger instance (unused)
        method_name: The logging method name (unused)
        event_dict: The event dictionary to process

    Returns:
        The event_dict unchanged (pass-through processor)

    """
    # Always return event_dict unchanged - we're just observing
    if not self._enabled:
        return event_dict

    # Use sampling to reduce overhead
    if random.random() > self.sample_rate:
        return event_dict

    # Measure processing time for this event
    start_time = time.perf_counter_ns()

    try:
        # Analyze event characteristics
        has_emoji = self._detect_emoji_processing(event_dict)
        field_count = len(event_dict)

        # Record metrics (very fast operation)
        processing_time = time.perf_counter_ns() - start_time
        self.metrics.record_message(
            duration_ns=processing_time,
            has_emoji=has_emoji,
            field_count=field_count,
        )

    except Exception:
        # Never let profiling break the logging pipeline
        # Silently ignore any profiling errors
        pass

    return event_dict
disable
disable() -> None

Disable metrics collection.

Source code in provide/foundation/profiling/processor.py
def disable(self) -> None:
    """Disable metrics collection."""
    self._enabled = False
enable
enable() -> None

Enable metrics collection.

Source code in provide/foundation/profiling/processor.py
def enable(self) -> None:
    """Enable metrics collection."""
    self._enabled = True
get_metrics
get_metrics() -> ProfileMetrics

Get current metrics.

Returns:

Type Description
ProfileMetrics

Current ProfileMetrics instance

Source code in provide/foundation/profiling/processor.py
def get_metrics(self) -> ProfileMetrics:
    """Get current metrics.

    Returns:
        Current ProfileMetrics instance

    """
    return self.metrics
reset
reset() -> None

Reset collected metrics.

Source code in provide/foundation/profiling/processor.py
def reset(self) -> None:
    """Reset collected metrics."""
    self.metrics.reset()

SamplingError

SamplingError(
    message: str,
    *,
    sample_rate: float | None = None,
    samples_processed: int | None = None,
    **kwargs: Any
)

Bases: ProfilingError

Raised when sampling operations fail.

Parameters:

Name Type Description Default
message str

Sampling error message.

required
sample_rate float | None

The sample rate that caused the error.

None
samples_processed int | None

Optional number of samples processed.

None
**kwargs Any

Additional context passed to ProfilingError.

{}

Examples:

>>> raise SamplingError("Invalid sample rate", sample_rate=1.5)
>>> raise SamplingError("Sampling buffer overflow", samples_processed=1000)
Source code in provide/foundation/errors/profiling.py
def __init__(
    self,
    message: str,
    *,
    sample_rate: float | None = None,
    samples_processed: int | None = None,
    **kwargs: Any,
) -> None:
    if sample_rate is not None:
        kwargs.setdefault("context", {})["sampling.rate"] = sample_rate
    if samples_processed is not None:
        kwargs.setdefault("context", {})["sampling.processed"] = samples_processed
    super().__init__(message, **kwargs)

Functions

register_profiling

register_profiling(
    hub: Hub,
    sample_rate: float = DEFAULT_PROFILING_SAMPLE_RATE,
) -> None

Register profiling component with Hub and add CLI command.

Parameters:

Name Type Description Default
hub Hub

The Hub instance to register with

required
sample_rate float

Sampling rate for metrics collection

DEFAULT_PROFILING_SAMPLE_RATE
Example

from provide.foundation.hub import Hub from provide.foundation.profiling.component import register_profiling

hub = Hub() register_profiling(hub)

Source code in provide/foundation/profiling/component.py
@resilient(
    fallback=None,
    context_provider=lambda: {"operation": "register_profiling"},
)
def register_profiling(hub: Hub, sample_rate: float = DEFAULT_PROFILING_SAMPLE_RATE) -> None:
    """Register profiling component with Hub and add CLI command.

    Args:
        hub: The Hub instance to register with
        sample_rate: Sampling rate for metrics collection

    Example:
        >>> from provide.foundation.hub import Hub
        >>> from provide.foundation.profiling.component import register_profiling
        >>>
        >>> hub = Hub()
        >>> register_profiling(hub)

    """
    # Create and register the profiling component instance
    profiler = ProfilingComponent(sample_rate=sample_rate)

    # Register directly with the hub's component registry
    hub._component_registry.register(
        name="profiler",
        value=profiler,
        dimension=ComponentCategory.COMPONENT.value,
        metadata={"type": "profiling", "sample_rate": sample_rate},
    )

    # Register CLI command
    try:
        from provide.foundation.profiling.cli import register_profile_command

        register_profile_command(hub)

    except ImportError:
        # CLI components may not be available in all environments
        pass