Skip to content

Parsers

provide.foundation.parsers

TODO: Add module docstring.

Functions

auto_parse

auto_parse(attr: Any, value: str) -> Any

Automatically parse value based on an attrs field's type and metadata.

This function first checks for a converter in the field's metadata, then falls back to type-based parsing.

Parameters:

Name Type Description Default
attr Any

attrs field (from fields(Class))

required
value str

String value to parse

required

Returns:

Type Description
Any

Parsed value based on field type or converter

Examples:

>>> from attrs import define, field, fields
>>> @define
... class Config:
...     count: int = field()
...     enabled: bool = field()
...     custom: str = field(converter=lambda x: x.upper())
>>> c = Config(count=0, enabled=False, custom="")
>>> auto_parse(fields(Config).count, "42")
42
>>> auto_parse(fields(Config).enabled, "true")
True
>>> auto_parse(fields(Config).custom, "hello")
'HELLO'
Source code in provide/foundation/parsers/attrs_integration.py
def auto_parse(attr: Any, value: str) -> Any:
    """Automatically parse value based on an attrs field's type and metadata.

    This function first checks for a converter in the field's metadata,
    then falls back to type-based parsing.

    Args:
        attr: attrs field (from fields(Class))
        value: String value to parse

    Returns:
        Parsed value based on field type or converter

    Examples:
        >>> from attrs import define, field, fields
        >>> @define
        ... class Config:
        ...     count: int = field()
        ...     enabled: bool = field()
        ...     custom: str = field(converter=lambda x: x.upper())
        >>> c = Config(count=0, enabled=False, custom="")
        >>> auto_parse(fields(Config).count, "42")
        42
        >>> auto_parse(fields(Config).enabled, "true")
        True
        >>> auto_parse(fields(Config).custom, "hello")
        'HELLO'

    """
    # Check for attrs field converter first
    if hasattr(attr, "converter"):
        success, result = _try_converter(attr.converter, value)
        if success:
            return result

    # Check for converter in metadata as fallback
    if hasattr(attr, "metadata") and attr.metadata:
        converter = attr.metadata.get("converter")
        success, result = _try_converter(converter, value)
        if success:
            return result

    # Get type hint from attrs field and try type-based parsing
    field_type = _extract_field_type(attr)
    if field_type is not None:
        return parse_typed_value(value, field_type)

    # No type info, return as string
    return value

extract_concrete_type

extract_concrete_type(annotation: Any) -> type

Extract concrete type from type annotation, handling unions, optionals, and string annotations.

This function handles: - Union types (str | None, Union[str, None]) - Optional types (str | None) - Regular types (str, int, bool) - String annotations (from future import annotations) - Generic types (list[int], dict[str, str])

Parameters:

Name Type Description Default
annotation Any

Type annotation from function signature or attrs field

required

Returns:

Type Description
type

Concrete type that can be used for parsing

Examples:

>>> extract_concrete_type(str | None)
<class 'str'>
>>> extract_concrete_type('str | None')
<class 'str'>
>>> extract_concrete_type(list[int])
list[int]
Source code in provide/foundation/parsers/typed.py
def extract_concrete_type(annotation: Any) -> type:
    """Extract concrete type from type annotation, handling unions, optionals, and string annotations.

    This function handles:
    - Union types (str | None, Union[str, None])
    - Optional types (str | None)
    - Regular types (str, int, bool)
    - String annotations (from __future__ import annotations)
    - Generic types (list[int], dict[str, str])

    Args:
        annotation: Type annotation from function signature or attrs field

    Returns:
        Concrete type that can be used for parsing

    Examples:
        >>> extract_concrete_type(str | None)
        <class 'str'>
        >>> extract_concrete_type('str | None')
        <class 'str'>
        >>> extract_concrete_type(list[int])
        list[int]
    """
    # Handle string annotations (from __future__ import annotations)
    if isinstance(annotation, str):
        annotation = annotation.strip()

        # Handle Union types as strings (e.g., "str | None")
        if " | " in annotation:
            parts = [part.strip() for part in annotation.split(" | ")]
            non_none_parts = [part for part in parts if part != "None"]
            if non_none_parts:
                annotation = non_none_parts[0]
            else:
                return str  # Default to str if only None

        # Map string type names to actual types
        type_mapping = {
            "str": str,
            "int": int,
            "bool": bool,
            "float": float,
            "list": list,
            "dict": dict,
            "tuple": tuple,
            "set": set,
            "Path": str,  # Path objects are handled as strings
            "pathlib.Path": str,
        }

        return type_mapping.get(annotation, str)

    # Handle None type
    if annotation is type(None):
        return str  # Default to str

    # Get origin and args for generic types
    origin = get_origin(annotation)
    args = get_args(annotation)

    # Handle Union types (including Optional which is Union[T, None])
    if origin is typing.Union or (hasattr(types, "UnionType") and isinstance(annotation, types.UnionType)):
        # For Python 3.10+ union syntax (str | None)
        if hasattr(annotation, "__args__"):
            args = annotation.__args__

        # Filter out None type to get the actual type
        non_none_types = [t for t in args if t is not type(None)]

        if non_none_types:
            # Return the first non-None type
            return non_none_types[0]

        # If only None, default to str
        return str

    # For generic types, return as-is (e.g., list[int])
    if origin is not None:
        return annotation

    # For non-generic types, return as-is
    return annotation

parse_bool

parse_bool(value: Any, strict: bool = False) -> bool

Parse a boolean value from string or other types.

Accepts: true/false, yes/no, 1/0, on/off (case-insensitive)

Parameters:

Name Type Description Default
value Any

Value to parse as boolean

required
strict bool

If True, only accept bool or string types (raise TypeError otherwise)

False

Returns:

Type Description
bool

Boolean value

Raises:

Type Description
TypeError

If strict=True and value is not bool or string, or if value is not bool/str

ValueError

If value cannot be parsed as boolean

Source code in provide/foundation/parsers/primitives.py
def parse_bool(value: Any, strict: bool = False) -> bool:
    """Parse a boolean value from string or other types.

    Accepts: true/false, yes/no, 1/0, on/off (case-insensitive)

    Args:
        value: Value to parse as boolean
        strict: If True, only accept bool or string types (raise TypeError otherwise)

    Returns:
        Boolean value

    Raises:
        TypeError: If strict=True and value is not bool or string, or if value is not bool/str
        ValueError: If value cannot be parsed as boolean

    """
    if strict and not isinstance(value, (bool, str)):
        raise TypeError(f"Cannot convert {type(value).__name__} to bool: {value!r}")

    return parse_bool_strict(value)

parse_bool_extended

parse_bool_extended(value: str | bool) -> bool

Parse boolean from string with lenient/forgiving interpretation.

This is the lenient boolean parser - designed for user-facing configuration where we want to be forgiving of various inputs. Any unrecognized string defaults to False rather than raising an error.

Use Cases: - Environment variables set by end users - Feature flags that should default to "off" if misconfigured - Optional telemetry settings where failure should not break the system

Recognized True Values: true, yes, 1, on (case-insensitive) Recognized False Values: false, no, 0, off (case-insensitive) Default Behavior: Any other string → False (no error)

Parameters:

Name Type Description Default
value str | bool

Boolean string representation or actual bool

required

Returns:

Type Description
bool

Boolean value (defaults to False for unrecognized strings)

Examples:

>>> parse_bool_extended("yes")  # True
>>> parse_bool_extended("FALSE")  # False
>>> parse_bool_extended("invalid")  # False (no error)
>>> parse_bool_extended(True)  # True
Source code in provide/foundation/parsers/primitives.py
def parse_bool_extended(value: str | bool) -> bool:
    """Parse boolean from string with lenient/forgiving interpretation.

    This is the **lenient** boolean parser - designed for user-facing configuration
    where we want to be forgiving of various inputs. Any unrecognized string
    defaults to False rather than raising an error.

    **Use Cases:**
    - Environment variables set by end users
    - Feature flags that should default to "off" if misconfigured
    - Optional telemetry settings where failure should not break the system

    **Recognized True Values:** true, yes, 1, on (case-insensitive)
    **Recognized False Values:** false, no, 0, off (case-insensitive)
    **Default Behavior:** Any other string → False (no error)

    Args:
        value: Boolean string representation or actual bool

    Returns:
        Boolean value (defaults to False for unrecognized strings)

    Examples:
        >>> parse_bool_extended("yes")  # True
        >>> parse_bool_extended("FALSE")  # False
        >>> parse_bool_extended("invalid")  # False (no error)
        >>> parse_bool_extended(True)  # True

    """
    # If already a bool, return as-is
    if isinstance(value, bool):
        return value

    # Convert to string and parse
    value_lower = str(value).lower().strip()
    # Only return True for explicit true values, everything else is False
    return value_lower in ("true", "yes", "1", "on")

parse_bool_strict

parse_bool_strict(value: str | bool | int | float) -> bool

Parse boolean from string with strict validation and clear error messages.

This is the strict boolean parser - designed for internal APIs and critical configuration where invalid values should cause immediate failure with helpful error messages.

Use Cases: - Internal API parameters where precision matters - Critical system configurations where misconfiguration is dangerous - Programmatic configuration where clear validation errors help developers

Recognized True Values: true, yes, 1, on, enabled (case-insensitive), 1.0 Recognized False Values: false, no, 0, off, disabled (case-insensitive), 0.0 Error Behavior: Raises ValueError with helpful message for invalid values

Parameters:

Name Type Description Default
value str | bool | int | float

Boolean value as string, bool, int, or float

required

Returns:

Type Description
bool

Boolean value (never defaults - raises on invalid input)

Raises:

Type Description
TypeError

If value is not a string, bool, int, or float

ValueError

If value cannot be parsed as boolean

Examples:

>>> parse_bool_strict("yes")  # True
>>> parse_bool_strict("FALSE")  # False
>>> parse_bool_strict(1)  # True
>>> parse_bool_strict(0.0)  # False
>>> parse_bool_strict("invalid")  # ValueError with helpful message
>>> parse_bool_strict(42)  # ValueError - only 0/1 valid for numbers
Source code in provide/foundation/parsers/primitives.py
def parse_bool_strict(value: str | bool | int | float) -> bool:
    """Parse boolean from string with strict validation and clear error messages.

    This is the **strict** boolean parser - designed for internal APIs and critical
    configuration where invalid values should cause immediate failure with helpful
    error messages.

    **Use Cases:**
    - Internal API parameters where precision matters
    - Critical system configurations where misconfiguration is dangerous
    - Programmatic configuration where clear validation errors help developers

    **Recognized True Values:** true, yes, 1, on, enabled (case-insensitive), 1.0
    **Recognized False Values:** false, no, 0, off, disabled (case-insensitive), 0.0
    **Error Behavior:** Raises ValueError with helpful message for invalid values

    Args:
        value: Boolean value as string, bool, int, or float

    Returns:
        Boolean value (never defaults - raises on invalid input)

    Raises:
        TypeError: If value is not a string, bool, int, or float
        ValueError: If value cannot be parsed as boolean

    Examples:
        >>> parse_bool_strict("yes")  # True
        >>> parse_bool_strict("FALSE")  # False
        >>> parse_bool_strict(1)  # True
        >>> parse_bool_strict(0.0)  # False
        >>> parse_bool_strict("invalid")  # ValueError with helpful message
        >>> parse_bool_strict(42)  # ValueError - only 0/1 valid for numbers

    """
    # Check type first for clear error messages
    if not isinstance(value, str | bool | int | float):
        raise TypeError(
            f"Boolean field requires str, bool, int, or float, got {type(value).__name__}. "
            f"Received value: {value!r}",
        )

    # If already a bool, return as-is
    if isinstance(value, bool):
        return value

    # Handle numeric types - only 0 and 1 are valid
    if isinstance(value, int | float):
        if value == 1 or value == 1.0:
            return True
        if value == 0 or value == 0.0:
            return False
        raise ValueError(
            f"Numeric boolean must be 0 or 1, got {value}. "
            f"Use parse_bool_extended() for lenient parsing that defaults to False",
        )

    # Convert to string and parse
    value_lower = value.lower().strip()

    if value_lower in ("true", "yes", "1", "on", "enabled"):
        return True
    if value_lower in ("false", "no", "0", "off", "disabled"):
        return False
    raise ValueError(
        _format_invalid_value_error(
            "boolean",
            value,
            valid_options=["true", "false", "yes", "no", "1", "0", "on", "off", "enabled", "disabled"],
            additional_info="Use parse_bool_extended() for lenient parsing that defaults to False",
        ),
    )

parse_comma_list

parse_comma_list(value: str) -> list[str]

Parse comma-separated list of strings.

Parameters:

Name Type Description Default
value str

Comma-separated string

required

Returns:

Type Description
list[str]

List of trimmed non-empty strings

Source code in provide/foundation/parsers/collections.py
def parse_comma_list(value: str) -> list[str]:
    """Parse comma-separated list of strings.

    Args:
        value: Comma-separated string

    Returns:
        List of trimmed non-empty strings

    """
    if not value or not value.strip():
        return []

    return [item.strip() for item in value.split(",") if item.strip()]

parse_console_formatter

parse_console_formatter(value: str) -> ConsoleFormatterStr

Parse and validate console formatter string.

Parameters:

Name Type Description Default
value str

Formatter string (case-insensitive)

required

Returns:

Type Description
ConsoleFormatterStr

Valid formatter string in lowercase

Raises:

Type Description
ValueError

If the formatter is invalid

Source code in provide/foundation/parsers/telemetry.py
def parse_console_formatter(value: str) -> ConsoleFormatterStr:
    """Parse and validate console formatter string.

    Args:
        value: Formatter string (case-insensitive)

    Returns:
        Valid formatter string in lowercase

    Raises:
        ValueError: If the formatter is invalid

    """
    formatter = value.lower()
    if formatter not in _VALID_FORMATTER_TUPLE:
        raise ValueError(
            _format_invalid_value_error(
                "console_formatter",
                value,
                valid_options=list(_VALID_FORMATTER_TUPLE),
            ),
        )
    return cast("ConsoleFormatterStr", formatter)

parse_dict

parse_dict(
    value: str | dict[str, str],
    item_separator: str = ",",
    key_separator: str = "=",
    strip: bool = True,
) -> dict[str, str]

Parse a dictionary from a string.

Format: "key1=value1,key2=value2"

Parameters:

Name Type Description Default
value str | dict[str, str]

String or dict to parse

required
item_separator str

Separator between items

','
key_separator str

Separator between key and value

'='
strip bool

Whether to strip whitespace

True

Returns:

Type Description
dict[str, str]

Dictionary of string keys and values

Raises:

Type Description
ValueError

If format is invalid

Source code in provide/foundation/parsers/collections.py
def parse_dict(
    value: str | dict[str, str],
    item_separator: str = ",",
    key_separator: str = "=",
    strip: bool = True,
) -> dict[str, str]:
    """Parse a dictionary from a string.

    Format: "key1=value1,key2=value2"

    Args:
        value: String or dict to parse
        item_separator: Separator between items
        key_separator: Separator between key and value
        strip: Whether to strip whitespace

    Returns:
        Dictionary of string keys and values

    Raises:
        ValueError: If format is invalid

    """
    if isinstance(value, dict):
        return value

    if not value:
        return {}

    result = {}
    items = value.split(item_separator)

    for item in items:
        if not item:
            continue

        if key_separator not in item:
            raise ValueError(f"Invalid dict format: '{item}' missing '{key_separator}'")

        key, val = item.split(key_separator, 1)

        if strip:
            key = key.strip()
            val = val.strip()

        result[key] = val

    return result

parse_float_with_validation

parse_float_with_validation(
    value: str,
    min_val: float | None = None,
    max_val: float | None = None,
) -> float

Parse float with optional range validation.

Parameters:

Name Type Description Default
value str

String representation of float

required
min_val float | None

Minimum allowed value (inclusive)

None
max_val float | None

Maximum allowed value (inclusive)

None

Returns:

Type Description
float

Parsed float value

Raises:

Type Description
ValueError

If value is not a valid float or out of range

Source code in provide/foundation/parsers/primitives.py
def parse_float_with_validation(
    value: str,
    min_val: float | None = None,
    max_val: float | None = None,
) -> float:
    """Parse float with optional range validation.

    Args:
        value: String representation of float
        min_val: Minimum allowed value (inclusive)
        max_val: Maximum allowed value (inclusive)

    Returns:
        Parsed float value

    Raises:
        ValueError: If value is not a valid float or out of range

    """
    try:
        result = float(value)
    except (ValueError, TypeError) as e:
        raise ValueError(
            _format_invalid_value_error("float", value, expected_type="float"),
        ) from e

    if min_val is not None and result < min_val:
        raise ValueError(
            _format_validation_error("float", result, f"must be >= {min_val}"),
        )

    if max_val is not None and result > max_val:
        raise ValueError(
            _format_validation_error("float", result, f"must be <= {max_val}"),
        )

    return result

parse_foundation_log_output

parse_foundation_log_output(value: str) -> str

Parse and validate foundation log output destination.

Parameters:

Name Type Description Default
value str

Output destination string

required

Returns:

Type Description
str

Valid output destination (stderr, stdout, main)

Raises:

Type Description
ValueError

If the value is invalid

Source code in provide/foundation/parsers/telemetry.py
def parse_foundation_log_output(value: str) -> str:
    """Parse and validate foundation log output destination.

    Args:
        value: Output destination string

    Returns:
        Valid output destination (stderr, stdout, main)

    Raises:
        ValueError: If the value is invalid

    """
    if not value:
        return "stderr"

    normalized = value.lower().strip()
    valid_options = ("stderr", "stdout", "main")

    if normalized in valid_options:
        return normalized
    raise ValueError(
        _format_invalid_value_error(
            "foundation_log_output",
            value,
            valid_options=list(valid_options),
        ),
    )

parse_headers

parse_headers(
    value: str | dict[str, str],
) -> dict[str, str]

Parse HTTP headers from string format.

Format Requirements: - Comma-separated key=value pairs: "key1=value1,key2=value2" - Header names and values are trimmed of whitespace - Empty header names are ignored - Each pair must contain exactly one '=' separator - Invalid pairs are silently skipped

Examples: >>> parse_headers("Authorization=Bearer token,Content-Type=application/json")

>>> parse_headers("X-API-Key=secret123")  # Single header
{'X-API-Key': 'secret123'}

>>> parse_headers("valid=ok,invalid-no-equals,another=good")  # Partial success
{'valid': 'ok', 'another': 'good'}

>>> parse_headers("empty-value=")  # Empty values allowed
{'empty-value': ''}

Parameters:

Name Type Description Default
value str | dict[str, str]

Comma-separated key=value pairs for HTTP headers, or dict if already parsed

required

Returns:

Type Description
dict[str, str]

Dictionary of header name-value pairs.

dict[str, str]

Invalid entries are silently ignored.

Note

This parser is lenient by design - invalid header pairs are skipped rather than raising errors to allow partial configuration success in production environments.

Source code in provide/foundation/parsers/structured.py
def parse_headers(value: str | dict[str, str]) -> dict[str, str]:
    """Parse HTTP headers from string format.

    **Format Requirements:**
    - Comma-separated key=value pairs: "key1=value1,key2=value2"
    - Header names and values are trimmed of whitespace
    - Empty header names are ignored
    - Each pair must contain exactly one '=' separator
    - Invalid pairs are silently skipped

    **Examples:**
        >>> parse_headers("Authorization=Bearer token,Content-Type=application/json")
        {'Authorization': 'Bearer token', 'Content-Type': 'application/json'}

        >>> parse_headers("X-API-Key=secret123")  # Single header
        {'X-API-Key': 'secret123'}

        >>> parse_headers("valid=ok,invalid-no-equals,another=good")  # Partial success
        {'valid': 'ok', 'another': 'good'}

        >>> parse_headers("empty-value=")  # Empty values allowed
        {'empty-value': ''}

    Args:
        value: Comma-separated key=value pairs for HTTP headers, or dict if already parsed

    Returns:
        Dictionary of header name-value pairs.
        Invalid entries are silently ignored.

    Note:
        This parser is lenient by design - invalid header pairs are skipped rather than
        raising errors to allow partial configuration success in production environments.

    """
    # If already a dict (from factory default), return as-is
    if isinstance(value, dict):
        return value

    if not value or not value.strip():
        return {}

    result = {}
    for pair in value.split(","):
        pair = pair.strip()
        if not pair:
            continue

        if "=" not in pair:
            # Skip invalid entries
            continue

        key, val = pair.split("=", 1)
        key = key.strip()
        val = val.strip()

        if key:
            result[key] = val

    return result

parse_json_dict

parse_json_dict(value: str) -> dict[str, Any]

Parse JSON string into dictionary.

Parameters:

Name Type Description Default
value str

JSON string

required

Returns:

Type Description
dict[str, Any]

Parsed dictionary

Raises:

Type Description
ValueError

If JSON is invalid

Source code in provide/foundation/parsers/primitives.py
def parse_json_dict(value: str) -> dict[str, Any]:
    """Parse JSON string into dictionary.

    Args:
        value: JSON string

    Returns:
        Parsed dictionary

    Raises:
        ValueError: If JSON is invalid

    """
    if not value or not value.strip():
        return {}

    try:
        result = json_loads(value)
        if not isinstance(result, dict):
            raise ValueError(
                _format_invalid_value_error(
                    "json_dict",
                    type(result).__name__,
                    expected_type="JSON object",
                ),
            )
        return result
    except Exception as e:
        raise ValueError(
            _format_invalid_value_error("json_dict", value, expected_type="valid JSON"),
        ) from e

parse_json_list

parse_json_list(value: str) -> list[Any]

Parse JSON string into list.

Parameters:

Name Type Description Default
value str

JSON string

required

Returns:

Type Description
list[Any]

Parsed list

Raises:

Type Description
ValueError

If JSON is invalid

Source code in provide/foundation/parsers/primitives.py
def parse_json_list(value: str) -> list[Any]:
    """Parse JSON string into list.

    Args:
        value: JSON string

    Returns:
        Parsed list

    Raises:
        ValueError: If JSON is invalid

    """
    if not value or not value.strip():
        return []

    try:
        result = json_loads(value)
        if not isinstance(result, list):
            raise ValueError(
                _format_invalid_value_error(
                    "json_list",
                    type(result).__name__,
                    expected_type="JSON array",
                ),
            )
        return result
    except Exception as e:
        raise ValueError(
            _format_invalid_value_error("json_list", value, expected_type="valid JSON"),
        ) from e

parse_list

parse_list(
    value: str | list[str],
    separator: str = ",",
    strip: bool = True,
) -> list[str]

Parse a list from a string.

Parameters:

Name Type Description Default
value str | list[str]

String or list to parse

required
separator str

Separator character

','
strip bool

Whether to strip whitespace from items

True

Returns:

Type Description
list[str]

List of strings

Source code in provide/foundation/parsers/collections.py
def parse_list(
    value: str | list[str],
    separator: str = ",",
    strip: bool = True,
) -> list[str]:
    """Parse a list from a string.

    Args:
        value: String or list to parse
        separator: Separator character
        strip: Whether to strip whitespace from items

    Returns:
        List of strings

    """
    if isinstance(value, list):
        return value

    if not value:
        return []

    items = value.split(separator)

    if strip:
        items = [item.strip() for item in items]

    return items

parse_log_level

parse_log_level(value: str) -> LogLevelStr

Parse and validate log level string.

Parameters:

Name Type Description Default
value str

Log level string (case-insensitive)

required

Returns:

Type Description
LogLevelStr

Valid log level string in uppercase

Raises:

Type Description
ValueError

If the log level is invalid

Source code in provide/foundation/parsers/telemetry.py
def parse_log_level(value: str) -> LogLevelStr:
    """Parse and validate log level string.

    Args:
        value: Log level string (case-insensitive)

    Returns:
        Valid log level string in uppercase

    Raises:
        ValueError: If the log level is invalid

    """
    level = value.upper()
    if level not in _VALID_LOG_LEVEL_TUPLE:
        raise ValueError(
            _format_invalid_value_error(
                "log_level",
                value,
                valid_options=list(_VALID_LOG_LEVEL_TUPLE),
            ),
        )
    return cast("LogLevelStr", level)

parse_module_levels

parse_module_levels(
    value: str | dict[str, str],
) -> dict[str, LogLevelStr]

Parse module-specific log levels from string format.

Format Requirements: - String format: "module1:LEVEL,module2:LEVEL" (comma-separated pairs) - Dict format: Already parsed dictionary (validated and returned) - Log levels must be valid: TRACE, DEBUG, INFO, WARNING, ERROR, CRITICAL - Module names are trimmed of whitespace - Invalid log levels are silently ignored

Examples: >>> parse_module_levels("auth.service:DEBUG,database:ERROR")

>>> parse_module_levels("api:INFO")  # Single module
{'api': 'INFO'}

>>> parse_module_levels({"web": "warning"})  # Dict input (case normalized)
{'web': 'WARNING'}

>>> parse_module_levels("api:INFO,bad:INVALID,db:ERROR")  # Partial success
{'api': 'INFO', 'db': 'ERROR'}

Parameters:

Name Type Description Default
value str | dict[str, str]

Comma-separated module:level pairs or pre-parsed dict

required

Returns:

Type Description
dict[str, LogLevelStr]

Dictionary mapping module names to validated log level strings.

dict[str, LogLevelStr]

Invalid entries are silently ignored.

Note

This parser is lenient by design - invalid log levels are skipped rather than raising errors to allow partial configuration success in production environments.

Source code in provide/foundation/parsers/structured.py
def parse_module_levels(value: str | dict[str, str]) -> dict[str, LogLevelStr]:
    """Parse module-specific log levels from string format.

    **Format Requirements:**
    - String format: "module1:LEVEL,module2:LEVEL" (comma-separated pairs)
    - Dict format: Already parsed dictionary (validated and returned)
    - Log levels must be valid: TRACE, DEBUG, INFO, WARNING, ERROR, CRITICAL
    - Module names are trimmed of whitespace
    - Invalid log levels are silently ignored

    **Examples:**
        >>> parse_module_levels("auth.service:DEBUG,database:ERROR")
        {'auth.service': 'DEBUG', 'database': 'ERROR'}

        >>> parse_module_levels("api:INFO")  # Single module
        {'api': 'INFO'}

        >>> parse_module_levels({"web": "warning"})  # Dict input (case normalized)
        {'web': 'WARNING'}

        >>> parse_module_levels("api:INFO,bad:INVALID,db:ERROR")  # Partial success
        {'api': 'INFO', 'db': 'ERROR'}

    Args:
        value: Comma-separated module:level pairs or pre-parsed dict

    Returns:
        Dictionary mapping module names to validated log level strings.
        Invalid entries are silently ignored.

    Note:
        This parser is lenient by design - invalid log levels are skipped rather than
        raising errors to allow partial configuration success in production environments.

    """
    # If already a dict, validate and return
    if isinstance(value, dict):
        result = {}
        for module, level in value.items():
            try:
                result[module] = parse_log_level(level)
            except ValueError:
                # Skip invalid levels silently
                continue
        return result

    if not value or not value.strip():
        return {}

    result = {}
    for pair in value.split(","):
        pair = pair.strip()
        if not pair:
            continue

        if ":" not in pair:
            # Skip invalid entries silently
            continue

        module, level = pair.split(":", 1)
        module = module.strip()
        level = level.strip()

        if module:
            try:
                result[module] = parse_log_level(level)
            except ValueError:
                # Skip invalid log levels silently
                continue

    return result

parse_rate_limits

parse_rate_limits(
    value: str,
) -> dict[str, tuple[float, float]]

Parse per-logger rate limits from string format.

Format Requirements: - Comma-separated triplets: "logger1:rate:capacity,logger2:rate:capacity" - Rate and capacity must be valid float numbers - Logger names are trimmed of whitespace - Empty logger names are ignored - Invalid entries are silently skipped to allow partial success

Examples: >>> parse_rate_limits("api:10.0:100.0,worker:5.0:50.0")

>>> parse_rate_limits("db:1.5:25.0")  # Single entry
{'db': (1.5, 25.0)}

>>> parse_rate_limits("api:10:100,invalid:bad,worker:5:50")  # Partial success
{'api': (10.0, 100.0), 'worker': (5.0, 50.0)}

Parameters:

Name Type Description Default
value str

Comma-separated logger:rate:capacity triplets

required

Returns:

Type Description
dict[str, tuple[float, float]]

Dictionary mapping logger names to (rate, capacity) tuples.

dict[str, tuple[float, float]]

Invalid entries are silently ignored.

Note

This parser is lenient by design - invalid entries are skipped rather than raising errors to allow partial configuration success in production environments.

Source code in provide/foundation/parsers/structured.py
def parse_rate_limits(value: str) -> dict[str, tuple[float, float]]:
    """Parse per-logger rate limits from string format.

    **Format Requirements:**
    - Comma-separated triplets: "logger1:rate:capacity,logger2:rate:capacity"
    - Rate and capacity must be valid float numbers
    - Logger names are trimmed of whitespace
    - Empty logger names are ignored
    - Invalid entries are silently skipped to allow partial success

    **Examples:**
        >>> parse_rate_limits("api:10.0:100.0,worker:5.0:50.0")
        {'api': (10.0, 100.0), 'worker': (5.0, 50.0)}

        >>> parse_rate_limits("db:1.5:25.0")  # Single entry
        {'db': (1.5, 25.0)}

        >>> parse_rate_limits("api:10:100,invalid:bad,worker:5:50")  # Partial success
        {'api': (10.0, 100.0), 'worker': (5.0, 50.0)}

    Args:
        value: Comma-separated logger:rate:capacity triplets

    Returns:
        Dictionary mapping logger names to (rate, capacity) tuples.
        Invalid entries are silently ignored.

    Note:
        This parser is lenient by design - invalid entries are skipped rather than
        raising errors to allow partial configuration success in production environments.

    """
    if not value or not value.strip():
        return {}

    result = {}
    for triplet in value.split(","):
        triplet = triplet.strip()
        if not triplet:
            continue

        parts = triplet.split(":")
        if len(parts) != 3:
            # Skip invalid entries silently
            continue

        logger_name, rate_str, capacity_str = parts
        logger_name = logger_name.strip()

        if not logger_name:
            continue

        try:
            rate = float(rate_str.strip())
            capacity = float(capacity_str.strip())
            result[logger_name] = (rate, capacity)
        except (ValueError, TypeError):
            # Skip invalid numeric values silently
            continue

    return result

parse_sample_rate

parse_sample_rate(value: str) -> float

Parse sampling rate (0.0 to 1.0).

Parameters:

Name Type Description Default
value str

String representation of sampling rate

required

Returns:

Type Description
float

Float between 0.0 and 1.0

Raises:

Type Description
ValueError

If value is not valid or out of range

Source code in provide/foundation/parsers/primitives.py
def parse_sample_rate(value: str) -> float:
    """Parse sampling rate (0.0 to 1.0).

    Args:
        value: String representation of sampling rate

    Returns:
        Float between 0.0 and 1.0

    Raises:
        ValueError: If value is not valid or out of range

    """
    return parse_float_with_validation(value, min_val=0.0, max_val=1.0)

parse_set

parse_set(
    value: str | set[str],
    separator: str = ",",
    strip: bool = True,
) -> set[str]

Parse a set from a string.

Parameters:

Name Type Description Default
value str | set[str]

String or set to parse

required
separator str

Separator character

','
strip bool

Whether to strip whitespace from items

True

Returns:

Type Description
set[str]

Set of strings (duplicates removed)

Source code in provide/foundation/parsers/collections.py
def parse_set(
    value: str | set[str],
    separator: str = ",",
    strip: bool = True,
) -> set[str]:
    """Parse a set from a string.

    Args:
        value: String or set to parse
        separator: Separator character
        strip: Whether to strip whitespace from items

    Returns:
        Set of strings (duplicates removed)

    """
    if isinstance(value, set):
        return value

    # Reuse list parsing logic, then convert to set
    items = parse_list(value, separator=separator, strip=strip)
    return set(items)

parse_tuple

parse_tuple(
    value: str | tuple[str, ...],
    separator: str = ",",
    strip: bool = True,
) -> tuple[str, ...]

Parse a tuple from a string.

Parameters:

Name Type Description Default
value str | tuple[str, ...]

String or tuple to parse

required
separator str

Separator character

','
strip bool

Whether to strip whitespace from items

True

Returns:

Type Description
tuple[str, ...]

Tuple of strings

Source code in provide/foundation/parsers/collections.py
def parse_tuple(
    value: str | tuple[str, ...],
    separator: str = ",",
    strip: bool = True,
) -> tuple[str, ...]:
    """Parse a tuple from a string.

    Args:
        value: String or tuple to parse
        separator: Separator character
        strip: Whether to strip whitespace from items

    Returns:
        Tuple of strings

    """
    if isinstance(value, tuple):
        return value

    # Reuse list parsing logic
    items = parse_list(value, separator=separator, strip=strip)
    return tuple(items)

parse_typed_value

parse_typed_value(value: str, target_type: type) -> Any

Parse a string value to a specific type.

Handles basic types (int, float, bool, str) and generic types (list, dict). For attrs fields, pass field.type as target_type.

Parameters:

Name Type Description Default
value str

String value to parse

required
target_type type

Target type to convert to

required

Returns:

Type Description
Any

Parsed value of the target type

Examples:

>>> parse_typed_value("42", int)
42
>>> parse_typed_value("true", bool)
True
>>> parse_typed_value("a,b,c", list)
['a', 'b', 'c']
Source code in provide/foundation/parsers/typed.py
def parse_typed_value(value: str, target_type: type) -> Any:
    """Parse a string value to a specific type.

    Handles basic types (int, float, bool, str) and generic types (list, dict).
    For attrs fields, pass field.type as target_type.

    Args:
        value: String value to parse
        target_type: Target type to convert to

    Returns:
        Parsed value of the target type

    Examples:
        >>> parse_typed_value("42", int)
        42
        >>> parse_typed_value("true", bool)
        True
        >>> parse_typed_value("a,b,c", list)
        ['a', 'b', 'c']

    """
    if value is None:
        return None

    # Try basic types first
    result = _parse_basic_type(value, target_type)
    if result is not None or target_type in (bool, int, float, str):
        return result

    # Try generic types
    result = _parse_generic_type(value, target_type)
    if result is not None:
        return result

    # Default to string
    return value