Refactor configuration and assessment modules; add S3 integration and update LLM client usage
This commit is contained in:
39
example.run_config.yaml
Normal file
39
example.run_config.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
# Helia Run Configuration
|
||||
# This file defines the "providers" (LLM connections) and the "runs" (experiments).
|
||||
# Environment variables like ${OPENAI_API_KEY} are expanded at runtime.
|
||||
|
||||
providers:
|
||||
openai:
|
||||
api_key: "${OPENAI_API_KEY}"
|
||||
api_base: "https://api.openai.com/v1"
|
||||
api_format: "openai"
|
||||
|
||||
anthropic:
|
||||
api_key: "${ANTHROPIC_API_KEY}"
|
||||
api_base: "https://api.anthropic.com/v1"
|
||||
api_format: "anthropic"
|
||||
|
||||
openrouter:
|
||||
api_key: "${OPENROUTER_API_KEY}"
|
||||
api_base: "https://openrouter.ai/api/v1"
|
||||
api_format: "openai"
|
||||
|
||||
local_ollama:
|
||||
api_key: "none"
|
||||
api_base: "http://localhost:11434/v1"
|
||||
api_format: "ollama"
|
||||
|
||||
runs:
|
||||
- run_name: "baseline_gpt4"
|
||||
model:
|
||||
provider: openai
|
||||
model_name: "gpt-4o"
|
||||
temperature: 0.0
|
||||
prompt_id: "default"
|
||||
|
||||
- run_name: "test_llama3"
|
||||
model:
|
||||
provider: local_ollama
|
||||
model_name: "llama3"
|
||||
temperature: 0.7
|
||||
prompt_id: "default"
|
||||
Reference in New Issue
Block a user