Skip to content

Testmode

provide.foundation.testmode

TODO: Add module docstring.

Functions

configure_structlog_for_test_safety

configure_structlog_for_test_safety() -> None

Configure structlog to use stdout for multiprocessing safety.

When running tests with parallel execution (pytest-xdist, mutmut with --max-children, etc.), file handles don't survive process forking. This causes "I/O operation on closed file" errors when structlog's PrintLogger tries to write to file handles from forked processes.

This function configures structlog to use sys.stdout which is safe for multiprocessing and properly handled by pytest.

Should be called automatically when is_in_test_mode() returns True.

Source code in provide/foundation/testmode/detection.py
def configure_structlog_for_test_safety() -> None:
    """Configure structlog to use stdout for multiprocessing safety.

    When running tests with parallel execution (pytest-xdist, mutmut with
    --max-children, etc.), file handles don't survive process forking.
    This causes "I/O operation on closed file" errors when structlog's
    PrintLogger tries to write to file handles from forked processes.

    This function configures structlog to use sys.stdout which is safe
    for multiprocessing and properly handled by pytest.

    Should be called automatically when is_in_test_mode() returns True.
    """
    import logging as stdlib_logging
    import sys

    import structlog

    # Configure structlog to use stdout (safe for multiprocessing)
    structlog.configure(
        processors=[
            structlog.processors.TimeStamper(fmt="iso"),
            structlog.dev.ConsoleRenderer(),
        ],
        wrapper_class=structlog.make_filtering_bound_logger(stdlib_logging.INFO),
        context_class=dict,
        logger_factory=structlog.PrintLoggerFactory(file=sys.stdout),
        cache_logger_on_first_use=False,  # Disable caching for test isolation
    )

get_test_unsafe_features

get_test_unsafe_features() -> dict[str, dict[str, Any]]

Get the registry of all test-unsafe features.

This is primarily used by validation tests to ensure all test-unsafe features are properly decorated.

Returns:

Type Description
dict[str, dict[str, Any]]

Dictionary mapping function IDs to their metadata

Example

features = get_test_unsafe_features() assert "process.title.set_process_title" in features

Source code in provide/foundation/testmode/decorators.py
def get_test_unsafe_features() -> dict[str, dict[str, Any]]:
    """Get the registry of all test-unsafe features.

    This is primarily used by validation tests to ensure all test-unsafe
    features are properly decorated.

    Returns:
        Dictionary mapping function IDs to their metadata

    Example:
        >>> features = get_test_unsafe_features()
        >>> assert "process.title.set_process_title" in features

    """
    return _TEST_UNSAFE_FEATURES.copy()

is_in_click_testing

is_in_click_testing() -> bool

Check if we're running inside Click's testing framework.

This detects Click's CliRunner testing context to prevent stream manipulation that could interfere with Click's output capture.

Returns:

Type Description
bool

True if running in Click testing context, False otherwise

Source code in provide/foundation/testmode/detection.py
def is_in_click_testing() -> bool:
    """Check if we're running inside Click's testing framework.

    This detects Click's CliRunner testing context to prevent stream
    manipulation that could interfere with Click's output capture.

    Returns:
        True if running in Click testing context, False otherwise
    """
    from provide.foundation.streams.config import get_stream_config

    config = get_stream_config()

    # Check environment variables for Click testing
    if config.click_testing:
        return True

    # Check the call stack for Click's testing module or CLI integration tests
    for frame_info in inspect.stack():
        module = frame_info.frame.f_globals.get("__name__", "")
        filename = frame_info.filename or ""

        if "click.testing" in module or "test_cli_integration" in filename:
            return True

        # Also check for common Click testing patterns
        locals_self = frame_info.frame.f_locals.get("self")
        if locals_self is not None and hasattr(locals_self, "runner"):
            runner = locals_self.runner
            if hasattr(runner, "invoke") and "CliRunner" in str(type(runner)):
                return True

    return False

is_in_test_mode

is_in_test_mode() -> bool

Detect if we're running in a test environment.

This method checks for common test environment indicators to determine if Foundation components should adjust their behavior for test compatibility.

Performance: Results are cached after first detection since test mode doesn't change during process lifetime. Use _clear_test_mode_cache() in tests for proper isolation.

Returns:

Type Description
bool

True if running in test mode, False otherwise

Source code in provide/foundation/testmode/detection.py
def is_in_test_mode() -> bool:
    """Detect if we're running in a test environment.

    This method checks for common test environment indicators to determine
    if Foundation components should adjust their behavior for test compatibility.

    Performance: Results are cached after first detection since test mode
    doesn't change during process lifetime. Use _clear_test_mode_cache()
    in tests for proper isolation.

    Returns:
        True if running in test mode, False otherwise
    """
    global _test_mode_cache

    # Return cached result if available
    if _test_mode_cache is not None:
        return _test_mode_cache

    # Primary indicator: pytest current test environment variable (FAST)
    if "PYTEST_CURRENT_TEST" in os.environ:
        _test_mode_cache = True
        return True

    # Check if pytest is currently imported and active
    if "pytest" in sys.modules:
        # Additional check: make sure we're actually running in a test context (FAST)
        if any("pytest" in arg for arg in sys.argv):
            _test_mode_cache = True
            return True

        # Last resort: Check if pytest is actively running by looking for test-related
        # stack frames. This is EXPENSIVE so we only do it after fast checks fail.
        for frame_info in inspect.stack():
            filename = frame_info.filename or ""
            if "pytest" in filename or "/test_" in filename or "conftest.py" in filename:
                _test_mode_cache = True
                return True

    # Check for unittest runner in active execution (FAST)
    if "unittest" in sys.modules and any("unittest" in arg for arg in sys.argv):
        _test_mode_cache = True
        return True

    # Not in test mode - cache the negative result too
    _test_mode_cache = False
    return False

is_test_unsafe

is_test_unsafe(func: Callable[..., Any]) -> bool

Check if a function is registered as test-unsafe.

Parameters:

Name Type Description Default
func Callable[..., Any]

The function to check

required

Returns:

Type Description
bool

True if the function is decorated with @skip_in_test_mode

Example

@skip_in_test_mode() def my_function(): ... pass

is_test_unsafe(my_function) True

Source code in provide/foundation/testmode/decorators.py
def is_test_unsafe(func: Callable[..., Any]) -> bool:
    """Check if a function is registered as test-unsafe.

    Args:
        func: The function to check

    Returns:
        True if the function is decorated with @skip_in_test_mode

    Example:
        >>> @skip_in_test_mode()
        >>> def my_function():
        ...     pass
        >>>
        >>> is_test_unsafe(my_function)
        True

    """
    func_id = f"{func.__module__}.{func.__name__}"
    return func_id in _TEST_UNSAFE_FEATURES

reset_circuit_breaker_state

reset_circuit_breaker_state() -> None

Reset all circuit breaker instances to ensure test isolation.

This function resets all circuit breaker instances that were created by the @circuit_breaker decorator and direct instantiation to ensure their state doesn't leak between tests.

Source code in provide/foundation/testmode/internal.py
def reset_circuit_breaker_state() -> None:
    """Reset all circuit breaker instances to ensure test isolation.

    This function resets all circuit breaker instances that were created
    by the @circuit_breaker decorator and direct instantiation to ensure
    their state doesn't leak between tests.
    """
    # Reset all CircuitBreaker instances created directly (not via decorator)
    # Do this FIRST to catch all instances before decorator reset
    _reset_direct_circuit_breaker_instances()

    try:
        import asyncio

        from provide.foundation.resilience.decorators import (
            reset_circuit_breakers_for_testing,
            reset_test_circuit_breakers,
        )

        # Reset both production and test circuit breakers
        # These are now async functions, so we need to run them in an event loop

        # Check if we're in an async context (running event loop)
        try:
            asyncio.get_running_loop()
            # We're in an async context - skip reset to avoid blocking
            # This shouldn't happen in practice since reset is called from sync fixtures
            return
        except RuntimeError:
            # No running loop - we're in sync context, safe to proceed
            pass

        # Use asyncio.run() to create fresh event loop for each call
        # This is more reliable than trying to reuse get_event_loop()
        asyncio.run(reset_circuit_breakers_for_testing())
        asyncio.run(reset_test_circuit_breakers())
    except ImportError:
        # Resilience decorators module not available, skip
        pass

reset_foundation_for_testing

reset_foundation_for_testing() -> None

Complete Foundation reset for testing with transport re-registration.

This is the full reset function that testing frameworks should call. It performs the complete state reset and handles test-specific concerns like transport re-registration and test stream preservation.

Source code in provide/foundation/testmode/orchestration.py
def reset_foundation_for_testing() -> None:
    """Complete Foundation reset for testing with transport re-registration.

    This is the full reset function that testing frameworks should call.
    It performs the complete state reset and handles test-specific concerns
    like transport re-registration and test stream preservation.
    """
    global _reset_for_testing_in_progress

    # Prevent recursive resets during test cleanup
    if _reset_for_testing_in_progress:
        return

    _reset_for_testing_in_progress = True
    try:
        # Save current stream if it's a test stream (not stderr/stdout)
        import sys

        preserve_stream = None
        try:
            from provide.foundation.streams.core import get_log_stream

            current_stream = get_log_stream()
            # Only preserve if it's not stderr/stdout (i.e., it's a test stream)
            if current_stream not in (sys.stderr, sys.stdout):
                preserve_stream = current_stream
        except Exception:
            # Error getting current stream, skip preservation
            pass

        # Full reset with Hub-based state management
        reset_foundation_state()

        # Reset transport registration flags so transports can be re-registered
        try:
            from provide.foundation.testmode.internal import reset_transport_registration_flags

            reset_transport_registration_flags()
        except ImportError:
            # Testmode module not available
            pass

        # Re-register HTTP transport for tests that need it
        try:
            from provide.foundation.transport.http import _register_http_transport

            _register_http_transport()
        except ImportError:
            # Transport module not available
            pass

        # Final reset of lazy setup state (after transport registration)
        try:
            from provide.foundation.logger.core import _LAZY_SETUP_STATE

            _LAZY_SETUP_STATE.update({"done": False, "error": None, "in_progress": False})
        except ImportError:
            pass

        # Restore test stream if there was one
        if preserve_stream:
            try:
                from provide.foundation.streams.core import set_log_stream_for_testing

                set_log_stream_for_testing(preserve_stream)
            except Exception:
                # Error restoring stream, continue without it
                pass
    finally:
        # Always clear the in-progress flag
        _reset_for_testing_in_progress = False

reset_foundation_state

reset_foundation_state() -> None

Reset Foundation's complete internal state using proper orchestration.

This is the master reset function that knows the proper order and handles Foundation-specific concerns. It resets: - structlog configuration to defaults - Foundation Hub state (which manages all Foundation components) - Stream state back to defaults - Lazy setup state tracking (if available) - OpenTelemetry provider state (if available) - Foundation environment variables to defaults

This function encapsulates Foundation-internal knowledge about proper reset ordering and component dependencies.

Source code in provide/foundation/testmode/orchestration.py
def reset_foundation_state() -> None:
    """Reset Foundation's complete internal state using proper orchestration.

    This is the master reset function that knows the proper order and handles
    Foundation-specific concerns. It resets:
    - structlog configuration to defaults
    - Foundation Hub state (which manages all Foundation components)
    - Stream state back to defaults
    - Lazy setup state tracking (if available)
    - OpenTelemetry provider state (if available)
    - Foundation environment variables to defaults

    This function encapsulates Foundation-internal knowledge about proper
    reset ordering and component dependencies.
    """
    global _reset_in_progress

    # Prevent recursive resets that can cause infinite loops
    if _reset_in_progress:
        return

    _reset_in_progress = True
    try:
        # Import all the individual reset functions from internal module
        from provide.foundation.testmode.internal import (
            reset_circuit_breaker_state,
            reset_configuration_state,
            reset_coordinator_state,
            reset_event_loops,
            reset_eventsets_state,
            reset_hub_state,
            reset_logger_state,
            reset_state_managers,
            reset_streams_state,
            reset_structlog_state,
            reset_test_mode_cache,
            reset_time_machine_state,
            reset_version_cache,
        )

        # Signal that reset is in progress to prevent event enrichment and Hub event logging
        try:
            from provide.foundation.logger.processors.main import (
                set_reset_in_progress as set_processor_reset,
            )

            set_processor_reset(True)
        except ImportError:
            pass

        try:
            from provide.foundation.hub.event_handlers import (
                set_reset_in_progress as set_hub_reset,
            )

            set_hub_reset(True)
        except ImportError:
            pass

        # Reset Foundation environment variables first to avoid affecting other resets
        _reset_foundation_environment_variables()

        # Reset test mode cache early so subsequent detection is fresh
        reset_test_mode_cache()

        # Reset in the proper order to avoid triggering reinitialization
        reset_structlog_state()
        reset_streams_state()
        reset_version_cache()

        # Reset event enrichment processor state to prevent re-initialization during cleanup
        try:
            from provide.foundation.logger.processors.main import (
                reset_event_enrichment_state,
            )

            reset_event_enrichment_state()
        except ImportError:
            # Processor module not available, skip
            pass

        # Reset OpenTelemetry providers to avoid "Overriding" warnings and stream closure
        # Note: OpenTelemetry providers are designed to prevent override for safety.
        # In parallel test environments (pytest-xdist), skip this reset to avoid deadlocks.
        # The OTel provider reset manipulates internal _ONCE flags which can deadlock
        # across multiple worker processes. The warnings are harmless in test context.
        import os

        if not os.environ.get("PYTEST_XDIST_WORKER"):
            _reset_opentelemetry_providers()

        # Reset lazy setup state FIRST to prevent hub operations from triggering setup
        reset_logger_state()

        # Clear Hub (this handles all Foundation state including logger instances)
        reset_hub_state()

        # Reset coordinator and event set state
        reset_coordinator_state()
        reset_eventsets_state()

        # Reset circuit breaker state to prevent test isolation issues
        reset_circuit_breaker_state()

        # Reset new state management systems
        reset_state_managers()
        reset_configuration_state()

        # Final reset of logger state (after all operations that might trigger setup)
        reset_logger_state()

        # Reset time_machine patches FIRST to unfreeze time
        # This must happen BEFORE creating a new event loop so the loop doesn't cache frozen time
        reset_time_machine_state()

        # Then clean up event loops to get a fresh loop with unfrozen time
        # The new loop will have correct time.monotonic references
        reset_event_loops()
    finally:
        # Always clear the reset-in-progress flags
        _reset_in_progress = False
        try:
            from provide.foundation.logger.processors.main import (
                set_reset_in_progress as set_processor_reset,
            )

            set_processor_reset(False)
        except ImportError:
            pass

        try:
            from provide.foundation.hub.event_handlers import (
                set_reset_in_progress as set_hub_reset,
            )

            set_hub_reset(False)
        except ImportError:
            pass

reset_global_coordinator

reset_global_coordinator() -> None

Reset the global initialization coordinator state for testing.

This function resets the singleton InitializationCoordinator state to ensure proper test isolation between test runs.

WARNING: This should only be called from test code or test fixtures. Production code should not reset the global coordinator state.

Source code in provide/foundation/testmode/internal.py
def reset_global_coordinator() -> None:
    """Reset the global initialization coordinator state for testing.

    This function resets the singleton InitializationCoordinator state
    to ensure proper test isolation between test runs.

    WARNING: This should only be called from test code or test fixtures.
    Production code should not reset the global coordinator state.
    """
    try:
        from provide.foundation.hub.initialization import _coordinator

        _coordinator.reset_state()
    except ImportError:
        # Initialization module not available, skip
        pass

reset_hub_state

reset_hub_state() -> None

Reset Hub state to defaults.

This clears the Hub registry and resets all Hub components to their initial state.

Source code in provide/foundation/testmode/internal.py
def reset_hub_state() -> None:
    """Reset Hub state to defaults.

    This clears the Hub registry and resets all Hub components
    to their initial state.
    """
    global _hub_reset_in_progress

    # Prevent recursive resets that can trigger re-initialization
    if _hub_reset_in_progress:
        return

    _hub_reset_in_progress = True
    try:
        try:
            from provide.foundation.hub.manager import clear_hub

            clear_hub()
        except ImportError:
            # Hub module not available, skip
            pass

        try:
            # Also reset the initialized components cache
            from provide.foundation.hub.components import _initialized_components

            _initialized_components.clear()
        except ImportError:
            # Components module not available, skip
            pass

        try:
            # Clear the global component registry (where bootstrap_foundation registers components)
            from provide.foundation.hub.components import _component_registry

            _component_registry.clear()
        except ImportError:
            # Components module not available, skip
            pass

        try:
            # Clear the global command registry (where @register_command decorator registers commands)
            from provide.foundation.hub.registry import _command_registry

            _command_registry.clear()
        except ImportError:
            # Registry module not available, skip
            pass
    finally:
        _hub_reset_in_progress = False

reset_logger_state

reset_logger_state() -> None

Reset Foundation logger state to defaults.

This resets the lazy setup state and logger configuration flags without importing the full logger module to avoid circular dependencies.

Source code in provide/foundation/testmode/internal.py
def reset_logger_state() -> None:
    """Reset Foundation logger state to defaults.

    This resets the lazy setup state and logger configuration flags
    without importing the full logger module to avoid circular dependencies.
    """
    try:
        from provide.foundation.logger.core import _LAZY_SETUP_STATE

        _LAZY_SETUP_STATE.update({"done": False, "error": None, "in_progress": False})
    except ImportError:
        # Logger state not available, skip
        pass

    try:
        from provide.foundation.logger.core import logger as foundation_logger

        # Reset foundation logger state by bypassing the proxy to avoid circular initialization
        # Access the proxy's __dict__ directly to avoid triggering __setattr__
        foundation_logger.__dict__["_is_configured_by_setup"] = False
        foundation_logger.__dict__["_active_config"] = None
        foundation_logger.__dict__["_active_resolved_emoji_config"] = None
    except (ImportError, AttributeError, TypeError):
        # Skip if foundation_logger is a proxy without direct attribute access
        pass

reset_streams_state

reset_streams_state() -> None

Reset stream state to defaults.

This resets file streams and other stream-related state managed by the streams module.

Source code in provide/foundation/testmode/internal.py
def reset_streams_state() -> None:
    """Reset stream state to defaults.

    This resets file streams and other stream-related state
    managed by the streams module.
    """
    try:
        from provide.foundation.streams.file import reset_streams

        reset_streams()
    except ImportError:
        # Streams module not available, skip
        pass

reset_structlog_state

reset_structlog_state() -> None

Reset structlog configuration to defaults.

This is the most fundamental reset - it clears all structlog configuration and returns it to an unconfigured state.

Source code in provide/foundation/testmode/internal.py
def reset_structlog_state() -> None:
    """Reset structlog configuration to defaults.

    This is the most fundamental reset - it clears all structlog
    configuration and returns it to an unconfigured state.
    """
    structlog.reset_defaults()

reset_test_mode_cache

reset_test_mode_cache() -> None

Reset test mode detection cache.

This clears the cached test mode detection result, allowing fresh detection on the next call. This is important for test isolation when tests manipulate environment variables or sys.modules.

Source code in provide/foundation/testmode/internal.py
def reset_test_mode_cache() -> None:
    """Reset test mode detection cache.

    This clears the cached test mode detection result, allowing fresh
    detection on the next call. This is important for test isolation
    when tests manipulate environment variables or sys.modules.
    """
    from provide.foundation.testmode.detection import _clear_test_mode_cache

    _clear_test_mode_cache()

reset_version_cache

reset_version_cache() -> None

Reset version cache to defaults.

This clears the cached version to ensure clean state between tests, allowing each test to verify different version resolution scenarios.

Source code in provide/foundation/testmode/internal.py
def reset_version_cache() -> None:
    """Reset version cache to defaults.

    This clears the cached version to ensure clean state
    between tests, allowing each test to verify different
    version resolution scenarios.
    """
    try:
        from provide.foundation._version import (  # type: ignore[import-untyped]
            reset_version_cache as _reset_cache,
        )

        _reset_cache()
    except ImportError:
        # Version module not available, skip
        pass

should_use_shared_registries

should_use_shared_registries(
    use_shared_registries: bool,
    component_registry: object | None,
    command_registry: object | None,
) -> bool

Determine if Hub should use shared registries based on explicit parameters.

Parameters:

Name Type Description Default
use_shared_registries bool

Explicit user preference

required
component_registry object | None

Custom component registry if provided

required
command_registry object | None

Custom command registry if provided

required

Returns:

Type Description
bool

True if shared registries should be used

Source code in provide/foundation/testmode/detection.py
def should_use_shared_registries(
    use_shared_registries: bool,
    component_registry: object | None,
    command_registry: object | None,
) -> bool:
    """Determine if Hub should use shared registries based on explicit parameters.

    Args:
        use_shared_registries: Explicit user preference
        component_registry: Custom component registry if provided
        command_registry: Custom command registry if provided

    Returns:
        True if shared registries should be used
    """
    # Return explicit preference - no auto-detection magic
    return use_shared_registries

skip_in_test_mode

skip_in_test_mode(
    return_value: Any = None,
    log_level: str = "debug",
    reason: str | None = None,
) -> Callable[[F], F]

Decorator to skip function execution in test mode.

Marks a function as test-unsafe and automatically skips execution when running in test mode. The function is registered in a global registry for validation purposes.

This decorator is reusable for any scenario where you want to conditionally skip function execution based on runtime detection.

Parameters:

Name Type Description Default
return_value Any

Value to return when skipped (default: None)

None
log_level str

Log level for skip message (default: "debug")

'debug'
reason str | None

Optional custom reason for skipping (for logging)

None

Returns:

Type Description
Callable[[F], F]

Decorated function that checks test mode before execution

Example

@skip_in_test_mode(return_value=True) def set_system_state(value: str) -> bool: ... # This won't run in tests ... os.system(f"something {value}") ... return True

@skip_in_test_mode(return_value=None, reason="systemd not available in tests") def notify_systemd(status: str) -> None: ... systemd.notify(status)

Source code in provide/foundation/testmode/decorators.py
def skip_in_test_mode(
    return_value: Any = None,
    log_level: str = "debug",
    reason: str | None = None,
) -> Callable[[F], F]:
    """Decorator to skip function execution in test mode.

    Marks a function as test-unsafe and automatically skips execution when
    running in test mode. The function is registered in a global registry
    for validation purposes.

    This decorator is reusable for any scenario where you want to conditionally
    skip function execution based on runtime detection.

    Args:
        return_value: Value to return when skipped (default: None)
        log_level: Log level for skip message (default: "debug")
        reason: Optional custom reason for skipping (for logging)

    Returns:
        Decorated function that checks test mode before execution

    Example:
        >>> @skip_in_test_mode(return_value=True)
        >>> def set_system_state(value: str) -> bool:
        ...     # This won't run in tests
        ...     os.system(f"something {value}")
        ...     return True

        >>> @skip_in_test_mode(return_value=None, reason="systemd not available in tests")
        >>> def notify_systemd(status: str) -> None:
        ...     systemd.notify(status)

    """

    def decorator(func: F) -> F:
        # Register this function as test-unsafe
        func_id = f"{func.__module__}.{func.__name__}"
        _TEST_UNSAFE_FEATURES[func_id] = {
            "function": func,
            "return_value": return_value,
            "reason": reason or "Test mode detected - preventing test interference",
        }

        @functools.wraps(func)
        def wrapper(*args: Any, **kwargs: Any) -> Any:
            if is_in_test_mode():
                # Determine log message
                skip_reason = reason or "Test mode detected - preventing test interference"

                # Get the logger method (debug, info, warning, etc.)
                log_method = getattr(log, log_level, log.debug)

                # Log the skip
                log_method(
                    f"Skipping {func.__name__} in test mode",
                    function=func.__name__,
                    reason=skip_reason,
                    return_value=return_value,
                )

                return return_value

            # Not in test mode - execute normally
            return func(*args, **kwargs)

        return wrapper  # type: ignore[return-value]

    return decorator