Skip to content

llm

Classes:

Functions:

LLMSettings dataclass

LLMSettings(
    provider: str = 'openai',
    model: str = '',
    api_key: str = '',
    engine: str = '',
    temperature: float = 0.0,
    max_tokens: int = 4096,
    persist_raw: bool = True,
    api_params: dict[str, Any] = dict(),
)

Methods:

Attributes:

normalized_provider property

normalized_provider: str

to_litellm_kwargs

to_litellm_kwargs() -> dict[str, Any]
Source code in src/hiperhealth/llm.py
def to_litellm_kwargs(self) -> dict[str, Any]:
    """
    title: Build LiteLLM completion kwargs from the current settings.
    returns:
      type: dict[str, Any]
      description: Return value.
    """
    kwargs = dict(self.api_params)
    base_url = kwargs.pop('base_url', '')
    if base_url and 'api_base' not in kwargs:
        kwargs['api_base'] = base_url
    kwargs['model'] = self.to_litellm_model()
    kwargs['temperature'] = self.temperature
    kwargs['max_tokens'] = self.max_tokens
    if self.api_key:
        kwargs['api_key'] = self.api_key
    return kwargs

to_litellm_model

to_litellm_model() -> str
Source code in src/hiperhealth/llm.py
def to_litellm_model(self) -> str:
    """
    title: Return the fully-qualified LiteLLM model identifier.
    returns:
      type: str
      description: Return value.
    """
    model_name = self.model or self.engine
    if not model_name:
        raise ValueError(
            'LLM model is required. Set HIPERHEALTH_*_LLM_MODEL or '
            'pass LLMSettings(model=...).'
        )
    if '/' in model_name:
        return model_name
    return f'{self.normalized_provider}/{model_name}'

with_overrides

with_overrides(
    *,
    provider: str | None = None,
    model: str | None = None,
    api_key: str | None = None,
    engine: str | None = None,
    temperature: float | None = None,
    max_tokens: int | None = None,
    persist_raw: bool | None = None,
    api_params: dict[str, Any] | None = None,
) -> LLMSettings
Source code in src/hiperhealth/llm.py
def with_overrides(
    self,
    *,
    provider: str | None = None,
    model: str | None = None,
    api_key: str | None = None,
    engine: str | None = None,
    temperature: float | None = None,
    max_tokens: int | None = None,
    persist_raw: bool | None = None,
    api_params: dict[str, Any] | None = None,
) -> LLMSettings:
    """
    title: Return a copy with selective overrides applied.
    parameters:
      provider:
        type: str | None
        description: Value for provider.
      model:
        type: str | None
        description: Value for model.
      api_key:
        type: str | None
        description: Value for api_key.
      engine:
        type: str | None
        description: Value for engine.
      temperature:
        type: float | None
        description: Value for temperature.
      max_tokens:
        type: int | None
        description: Value for max_tokens.
      persist_raw:
        type: bool | None
      api_params:
        type: dict[str, Any] | None
        description: Value for api_params.
    returns:
      type: LLMSettings
      description: Return value.
    """
    merged_params = dict(self.api_params)
    if api_params:
        merged_params.update(api_params)

    return replace(
        self,
        provider=provider or self.provider,
        model=model if model is not None else self.model,
        api_key=api_key if api_key is not None else self.api_key,
        engine=engine if engine is not None else self.engine,
        temperature=(
            temperature if temperature is not None else self.temperature
        ),
        max_tokens=max_tokens
        if max_tokens is not None
        else self.max_tokens,
        persist_raw=persist_raw
        if persist_raw is not None
        else self.persist_raw,
        api_params=merged_params,
    )

LiteLLMStructuredLLM

LiteLLMStructuredLLM(
    settings: LLMSettings,
    completion_fn: _CompletionFn | None = None,
)

Methods:

Source code in src/hiperhealth/llm.py
def __init__(
    self,
    settings: LLMSettings,
    completion_fn: _CompletionFn | None = None,
) -> None:
    """
    title: Initialize the LiteLLM-backed structured adapter.
    parameters:
      settings:
        type: LLMSettings
      completion_fn:
        type: _CompletionFn | None
    """
    self.settings = settings
    self._completion_fn = completion_fn

generate

generate(
    system: str, user: str, output_type: type[TModel]
) -> TModel
Source code in src/hiperhealth/llm.py
def generate(
    self,
    system: str,
    user: str,
    output_type: type[TModel],
) -> TModel:
    """
    title: Generate a structured response using the configured backend.
    parameters:
      system:
        type: str
        description: Value for system.
      user:
        type: str
        description: Value for user.
      output_type:
        type: type[TModel]
        description: Value for output_type.
    returns:
      type: TModel
      description: Return value.
    """
    completion_fn = self._get_completion_fn()
    response = completion_fn(
        messages=_build_messages(system, user, output_type),
        **self.settings.to_litellm_kwargs(),
    )
    result = _extract_message_content(response)
    return _coerce_model_output(result, output_type)

StructuredLLM

Bases: Protocol

Methods:

generate

generate(
    system: str, user: str, output_type: type[TModel]
) -> TModel
Source code in src/hiperhealth/llm.py
def generate(
    self,
    system: str,
    user: str,
    output_type: type[TModel],
) -> TModel:
    """
    title: Generate and validate a structured response.
    parameters:
      system:
        type: str
        description: Value for system.
      user:
        type: str
        description: Value for user.
      output_type:
        type: type[TModel]
        description: Value for output_type.
    returns:
      type: TModel
      description: Return value.
    """

build_structured_llm

build_structured_llm(
    settings: LLMSettings | None = None,
    *,
    completion_fn: _CompletionFn | None = None,
) -> StructuredLLM
Source code in src/hiperhealth/llm.py
def build_structured_llm(
    settings: LLMSettings | None = None,
    *,
    completion_fn: _CompletionFn | None = None,
) -> StructuredLLM:
    """
    title: Build the default structured LLM adapter for hiperhealth workflows.
    parameters:
      settings:
        type: LLMSettings | None
        description: Value for settings.
      completion_fn:
        type: _CompletionFn | None
        description: Value for completion_fn.
    returns:
      type: StructuredLLM
      description: Return value.
    """
    effective_settings = settings or load_diagnostics_llm_settings()
    return LiteLLMStructuredLLM(
        settings=effective_settings,
        completion_fn=completion_fn,
    )

load_diagnostics_llm_settings

load_diagnostics_llm_settings() -> LLMSettings
Source code in src/hiperhealth/llm.py
def load_diagnostics_llm_settings() -> LLMSettings:
    """
    title: Load diagnostics-generation settings from env variables.
    returns:
      type: LLMSettings
      description: Return value.
    """
    return load_llm_settings(
        prefixes=(_DIAGNOSTICS_PREFIX, _GENERIC_PREFIX),
        default_provider='openai',
        legacy_model_envs=('OPENAI_MODEL',),
        legacy_api_key_envs=('OPENAI_API_KEY',),
    )

load_llm_settings

load_llm_settings(
    *,
    prefixes: tuple[str, ...] = (_GENERIC_PREFIX,),
    default_provider: str = 'openai',
    legacy_model_envs: tuple[str, ...] = (),
    legacy_api_key_envs: tuple[str, ...] = (),
) -> LLMSettings
Source code in src/hiperhealth/llm.py
def load_llm_settings(
    *,
    prefixes: tuple[str, ...] = (_GENERIC_PREFIX,),
    default_provider: str = 'openai',
    legacy_model_envs: tuple[str, ...] = (),
    legacy_api_key_envs: tuple[str, ...] = (),
) -> LLMSettings:
    """
    title: Load LLM settings from env vars, with task-specific prefixes first.
    parameters:
      prefixes:
        type: tuple[str, Ellipsis]
        description: Value for prefixes.
      default_provider:
        type: str
        description: Value for default_provider.
      legacy_model_envs:
        type: tuple[str, Ellipsis]
        description: Value for legacy_model_envs.
      legacy_api_key_envs:
        type: tuple[str, Ellipsis]
        description: Value for legacy_api_key_envs.
    returns:
      type: LLMSettings
      description: Return value.
    """
    raw_provider = (
        (
            _first_nonempty_env(_prefixed_names(prefixes, 'PROVIDER'))
            or default_provider
        )
        .strip()
        .lower()
    )
    provider = _PROVIDER_ALIASES.get(raw_provider, raw_provider)

    model_env_names = _prefixed_names(prefixes, 'MODEL')
    if provider == 'openai':
        model_env_names += legacy_model_envs
    model = _first_nonempty_env(
        model_env_names
    ) or _DEFAULT_PROVIDER_MODEL.get(
        provider,
        '',
    )

    api_key_env_names = _prefixed_names(
        prefixes, 'API_KEY'
    ) + _PROVIDER_API_KEY_ENV.get(provider, ())
    if provider == 'openai':
        api_key_env_names += legacy_api_key_envs
    api_key = _first_nonempty_env(api_key_env_names)

    engine = _first_nonempty_env(_prefixed_names(prefixes, 'ENGINE'))
    temperature = _read_float_env(
        _prefixed_names(prefixes, 'TEMPERATURE'),
        default=0.0,
    )
    max_tokens = _read_int_env(
        _prefixed_names(prefixes, 'MAX_TOKENS'),
        default=4096,
    )

    api_params = _load_api_params(prefixes)
    base_url = _first_nonempty_env(_prefixed_names(prefixes, 'BASE_URL'))
    if base_url:
        api_params.setdefault('base_url', base_url)

    return LLMSettings(
        provider=provider,
        model=model,
        api_key=api_key,
        engine=engine,
        temperature=temperature,
        max_tokens=max_tokens,
        api_params=api_params,
    )