54 lines
1.0 KiB
YAML
54 lines
1.0 KiB
YAML
# Helia Application Configuration
|
|
# Copy this file to config.yaml and adjust values as needed.
|
|
|
|
log_level: "INFO"
|
|
patient_limit: 5
|
|
concurrency_limit: 1
|
|
|
|
mongo:
|
|
uri: "mongodb://localhost:27017"
|
|
db_name: "helia"
|
|
|
|
s3:
|
|
endpoint: "http://localhost:9000"
|
|
access_key_id: ""
|
|
secret_access_key: ""
|
|
bucket: "helia"
|
|
prefix: "daic-woz"
|
|
region: "eu-west-1"
|
|
|
|
providers:
|
|
openai:
|
|
api_base: "https://api.openai.com/v1"
|
|
api_key: "sk-xxx"
|
|
api_spec: "openai"
|
|
|
|
anthropic:
|
|
api_base: "https://api.anthropic.com/v1"
|
|
api_key: "sk-xxx"
|
|
api_spec: "anthropic"
|
|
|
|
openrouter:
|
|
api_base: "https://openrouter.ai/api/v1"
|
|
api_key: "sk-xxx"
|
|
api_spec: "openai"
|
|
|
|
local_ollama:
|
|
api_base: "http://localhost:11434/v1"
|
|
api_spec: "ollama"
|
|
|
|
runs:
|
|
baseline_gpt4:
|
|
model:
|
|
provider: "openai"
|
|
model_name: "gpt-4o"
|
|
temperature: 1.0
|
|
prompt_id: "default"
|
|
|
|
test_llama3:
|
|
model:
|
|
provider: "local_ollama"
|
|
model_name: "llama3"
|
|
temperature: 0.7
|
|
prompt_id: "default"
|