Refactor configuration and assessment modules; add S3 integration and update LLM client usage

This commit is contained in:
Santiago Martinez-Avial
2025-12-21 03:38:50 +01:00
parent 5ef0fc0ccc
commit 4a340a9661
14 changed files with 587 additions and 240 deletions

39
example.run_config.yaml Normal file
View File

@@ -0,0 +1,39 @@
# Helia Run Configuration
# This file defines the "providers" (LLM connections) and the "runs" (experiments).
# Environment variables like ${OPENAI_API_KEY} are expanded at runtime.
providers:
openai:
api_key: "${OPENAI_API_KEY}"
api_base: "https://api.openai.com/v1"
api_format: "openai"
anthropic:
api_key: "${ANTHROPIC_API_KEY}"
api_base: "https://api.anthropic.com/v1"
api_format: "anthropic"
openrouter:
api_key: "${OPENROUTER_API_KEY}"
api_base: "https://openrouter.ai/api/v1"
api_format: "openai"
local_ollama:
api_key: "none"
api_base: "http://localhost:11434/v1"
api_format: "ollama"
runs:
- run_name: "baseline_gpt4"
model:
provider: openai
model_name: "gpt-4o"
temperature: 0.0
prompt_id: "default"
- run_name: "test_llama3"
model:
provider: local_ollama
model_name: "llama3"
temperature: 0.7
prompt_id: "default"