Re-exports from hiperhealth.skills.diagnostics.core
for backward compatibility.
Functions:
differential
differential(
patient: dict[str, Any],
language: str = 'en',
session_id: str | None = None,
llm: StructuredLLM | None = None,
llm_settings: LLMSettings | None = None,
) -> LLMDiagnosis
Source code in src/hiperhealth/skills/diagnostics/core.py
| def differential(
patient: dict[str, Any],
language: str = 'en',
session_id: str | None = None,
llm: StructuredLLM | None = None,
llm_settings: LLMSettings | None = None,
) -> LLMDiagnosis:
"""
title: Return summary + list of differential diagnoses.
parameters:
patient:
type: dict[str, Any]
description: Value for patient.
language:
type: str
description: Value for language.
session_id:
type: str | None
description: Value for session_id.
llm:
type: StructuredLLM | None
description: Value for llm.
llm_settings:
type: LLMSettings | None
description: Value for llm_settings.
returns:
type: LLMDiagnosis
description: Return value.
"""
prompt = _diagnosis_prompt(language)
chat_kwargs: dict[str, Any] = {'session_id': session_id}
if llm is not None:
chat_kwargs['llm'] = llm
if llm_settings is not None:
chat_kwargs['llm_settings'] = llm_settings
return chat(
prompt,
json.dumps(patient, ensure_ascii=False),
**chat_kwargs,
)
|
exams
Source code in src/hiperhealth/skills/diagnostics/core.py
| def exams(
selected_dx: list[str],
language: str = 'en',
session_id: str | None = None,
llm: StructuredLLM | None = None,
llm_settings: LLMSettings | None = None,
) -> LLMDiagnosis:
"""
title: Return summary + list of suggested examinations.
parameters:
selected_dx:
type: list[str]
description: Value for selected_dx.
language:
type: str
description: Value for language.
session_id:
type: str | None
description: Value for session_id.
llm:
type: StructuredLLM | None
description: Value for llm.
llm_settings:
type: LLMSettings | None
description: Value for llm_settings.
returns:
type: LLMDiagnosis
description: Return value.
"""
prompt = _exam_prompt(language)
chat_kwargs: dict[str, Any] = {'session_id': session_id}
if llm is not None:
chat_kwargs['llm'] = llm
if llm_settings is not None:
chat_kwargs['llm_settings'] = llm_settings
return chat(
prompt,
json.dumps(selected_dx, ensure_ascii=False),
**chat_kwargs,
)
|