Refactor configuration and assessment modules; add S3 integration and update LLM client usage
This commit is contained in:
29
.env.example
29
.env.example
@@ -1,16 +1,17 @@
|
|||||||
# LLM Configuration
|
# Database Configuration
|
||||||
# Defaults to OpenRouter if not specified
|
HELIA_MONGO_URI=mongodb://localhost:27017
|
||||||
|
HELIA_DATABASE_NAME=helia
|
||||||
|
|
||||||
# Base URL for the LLM provider (default: https://openrouter.ai/api/v1)
|
# S3 Configuration (MinIO or AWS)
|
||||||
HELIA_LLM_BASE_URL=https://openrouter.ai/api/v1
|
# Required for finding and downloading transcripts
|
||||||
|
HELIA_S3_ENDPOINT=https://s3.amazonaws.com
|
||||||
|
HELIA_S3_ACCESS_KEY=your_access_key
|
||||||
|
HELIA_S3_SECRET_KEY=your_secret_key
|
||||||
|
HELIA_S3_BUCKET=your-bucket-name
|
||||||
|
HELIA_S3_REGION=us-east-1
|
||||||
|
|
||||||
# API Key. Checked in order: HELIA_LLM_API_KEY, OPENROUTER_API_KEY, OPENAI_API_KEY
|
# LLM API Keys
|
||||||
HELIA_LLM_API_KEY=sk-or-your-api-key-here
|
# These are used by the run configuration YAML via ${VAR} substitution
|
||||||
|
OPENAI_API_KEY=sk-...
|
||||||
# Model identifier (default: google/gemini-3.0-pro-preview)
|
ANTHROPIC_API_KEY=sk-ant-...
|
||||||
HELIA_LLM_MODEL=google/gemini-3.0-pro-preview
|
OPENROUTER_API_KEY=sk-or-...
|
||||||
|
|
||||||
# Neo4j Configuration
|
|
||||||
NEO4J_URI=bolt://localhost:7687
|
|
||||||
NEO4J_USER=neo4j
|
|
||||||
NEO4J_PASSWORD=password
|
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
# Helia Configuration Example
|
|
||||||
|
|
||||||
command: assess
|
|
||||||
|
|
||||||
input_file: "path/to/transcript.txt"
|
|
||||||
model: "gpt-4o"
|
|
||||||
prompt_id: "default"
|
|
||||||
temperature: 0.0
|
|
||||||
|
|
||||||
database:
|
|
||||||
uri: "mongodb://localhost:27017"
|
|
||||||
database_name: "helia"
|
|
||||||
39
example.run_config.yaml
Normal file
39
example.run_config.yaml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# Helia Run Configuration
|
||||||
|
# This file defines the "providers" (LLM connections) and the "runs" (experiments).
|
||||||
|
# Environment variables like ${OPENAI_API_KEY} are expanded at runtime.
|
||||||
|
|
||||||
|
providers:
|
||||||
|
openai:
|
||||||
|
api_key: "${OPENAI_API_KEY}"
|
||||||
|
api_base: "https://api.openai.com/v1"
|
||||||
|
api_format: "openai"
|
||||||
|
|
||||||
|
anthropic:
|
||||||
|
api_key: "${ANTHROPIC_API_KEY}"
|
||||||
|
api_base: "https://api.anthropic.com/v1"
|
||||||
|
api_format: "anthropic"
|
||||||
|
|
||||||
|
openrouter:
|
||||||
|
api_key: "${OPENROUTER_API_KEY}"
|
||||||
|
api_base: "https://openrouter.ai/api/v1"
|
||||||
|
api_format: "openai"
|
||||||
|
|
||||||
|
local_ollama:
|
||||||
|
api_key: "none"
|
||||||
|
api_base: "http://localhost:11434/v1"
|
||||||
|
api_format: "ollama"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
- run_name: "baseline_gpt4"
|
||||||
|
model:
|
||||||
|
provider: openai
|
||||||
|
model_name: "gpt-4o"
|
||||||
|
temperature: 0.0
|
||||||
|
prompt_id: "default"
|
||||||
|
|
||||||
|
- run_name: "test_llama3"
|
||||||
|
model:
|
||||||
|
provider: local_ollama
|
||||||
|
model_name: "llama3"
|
||||||
|
temperature: 0.7
|
||||||
|
prompt_id: "default"
|
||||||
@@ -9,24 +9,22 @@ description = "Agentic Interview Analysis Framework"
|
|||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.13"
|
requires-python = ">=3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"langchain>=0.1.0",
|
"langchain[openai]>=1.2.0",
|
||||||
"langchain-openai>=0.1.0",
|
|
||||||
"qdrant-client",
|
"qdrant-client",
|
||||||
"pydantic-settings>=2.12.0",
|
"pydantic-settings>=2.12.0",
|
||||||
"PyYAML>=6.0.1",
|
"PyYAML>=6.0.1",
|
||||||
"langgraph>=1.0.5",
|
"langgraph>=1.0.5",
|
||||||
"openai>=2.14.0",
|
|
||||||
"pydantic>=2.12.5",
|
"pydantic>=2.12.5",
|
||||||
"beanie>=2.0.1",
|
"beanie>=2.0.1",
|
||||||
"motor>=3.7.1",
|
"motor>=3.7.1",
|
||||||
"neo4j>=5.19.0",
|
"boto3>=1.42.14",
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.hatch.build.targets.wheel]
|
[tool.hatch.build.targets.wheel]
|
||||||
packages = ["src/helia"]
|
packages = ["src/helia"]
|
||||||
|
|
||||||
[dependency-groups]
|
[dependency-groups]
|
||||||
dev = ["ruff>=0.14.10", "pyrefly>=0.46.0"]
|
dev = ["ruff>=0.14.10", "ty>=0.0.5"]
|
||||||
|
|
||||||
[tool.ruff]
|
[tool.ruff]
|
||||||
line-length = 100
|
line-length = 100
|
||||||
@@ -39,7 +37,3 @@ ignore = ["D", "BLE", "EM101", "EM102", "E501", "COM812", "TD003", "TRY003"]
|
|||||||
[tool.ruff.lint.pydocstyle]
|
[tool.ruff.lint.pydocstyle]
|
||||||
# https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings
|
# https://github.com/google/styleguide/blob/gh-pages/pyguide.md#38-comments-and-docstrings
|
||||||
convention = "google"
|
convention = "google"
|
||||||
|
|
||||||
[tool.pyrefly]
|
|
||||||
search-path = ["src"]
|
|
||||||
project-includes = ["**/*.py*", "**/*.ipynb"]
|
|
||||||
|
|||||||
@@ -3,47 +3,61 @@ from __future__ import annotations
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from langgraph.graph import END, StateGraph
|
from langgraph.graph import END, StateGraph
|
||||||
from typing_extensions import TypedDict
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from helia.llm.client import get_openai_client
|
from helia.llm.client import get_chat_model
|
||||||
from helia.llm.settings import settings
|
from helia.llm.settings import settings
|
||||||
|
|
||||||
|
|
||||||
class AgentState(TypedDict):
|
class AgentState(BaseModel):
|
||||||
|
"""State for the agent workflow."""
|
||||||
|
|
||||||
question: str
|
question: str
|
||||||
plan: list[str]
|
plan: list[str] = []
|
||||||
context: list[str]
|
context: list[str] = []
|
||||||
answer: str
|
answer: str = ""
|
||||||
critique: str | None
|
critique: str | None = None
|
||||||
|
|
||||||
|
|
||||||
def planner_node(_state: AgentState) -> dict[str, Any]:
|
# Note: Node functions omit explicit return types intentionally.
|
||||||
|
# The ty type checker cannot unify `-> dict[str, Any]` with LangGraph's
|
||||||
|
# _Node protocol which expects `-> Any`. Omitting the annotation allows
|
||||||
|
# proper protocol matching while maintaining runtime correctness.
|
||||||
|
|
||||||
|
|
||||||
|
def planner_node(state: AgentState): # noqa: ANN201
|
||||||
|
"""Plan the steps to answer the question."""
|
||||||
|
_ = state
|
||||||
plan: list[str] = ["Understand question", "Retrieve info", "Synthesize answer"]
|
plan: list[str] = ["Understand question", "Retrieve info", "Synthesize answer"]
|
||||||
return {"plan": plan}
|
return {"plan": plan}
|
||||||
|
|
||||||
|
|
||||||
def router_node(state: AgentState) -> str:
|
def router_node(state: AgentState) -> str:
|
||||||
question = state["question"].lower()
|
"""Route to the appropriate tool based on question content."""
|
||||||
|
question = state.question.lower()
|
||||||
if "how many" in question or "when" in question:
|
if "how many" in question or "when" in question:
|
||||||
return "graph_tool"
|
return "graph_tool"
|
||||||
return "vector_tool"
|
return "vector_tool"
|
||||||
|
|
||||||
|
|
||||||
def graph_tool_node(state: AgentState) -> dict[str, Any]:
|
def graph_tool_node(state: AgentState): # noqa: ANN201
|
||||||
context = [*state["context"]]
|
"""Retrieve data from graph database."""
|
||||||
|
context = [*state.context]
|
||||||
context.append("Graph data: Interruption count = 5")
|
context.append("Graph data: Interruption count = 5")
|
||||||
return {"context": context}
|
return {"context": context}
|
||||||
|
|
||||||
|
|
||||||
def vector_tool_node(state: AgentState) -> dict[str, Any]:
|
def vector_tool_node(state: AgentState): # noqa: ANN201
|
||||||
context = [*state["context"]]
|
"""Retrieve data from vector store."""
|
||||||
|
context = [*state.context]
|
||||||
context.append("Vector data: Discussed salary at 10:00")
|
context.append("Vector data: Discussed salary at 10:00")
|
||||||
return {"context": context}
|
return {"context": context}
|
||||||
|
|
||||||
|
|
||||||
def synthesizer_node(state: AgentState) -> dict[str, Any]:
|
def synthesizer_node(state: AgentState): # noqa: ANN201
|
||||||
context_text = "\n".join(state["context"])
|
"""Synthesize an answer from the gathered context."""
|
||||||
question = state["question"]
|
context_text = "\n".join(state.context)
|
||||||
|
question = state.question
|
||||||
|
|
||||||
prompt = f"""
|
prompt = f"""
|
||||||
Answer the user's question based on the provided context.
|
Answer the user's question based on the provided context.
|
||||||
@@ -57,32 +71,38 @@ def synthesizer_node(state: AgentState) -> dict[str, Any]:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
client = get_openai_client()
|
llm = get_chat_model(
|
||||||
response = client.chat.completions.create(
|
model_name=settings.model,
|
||||||
model=settings.model,
|
api_key=settings.resolve_api_key(),
|
||||||
messages=[
|
base_url=settings.base_url,
|
||||||
{"role": "system", "content": "You are a helpful assistant."},
|
|
||||||
{"role": "user", "content": prompt},
|
|
||||||
],
|
|
||||||
)
|
)
|
||||||
answer = response.choices[0].message.content or "No answer generated."
|
messages = [
|
||||||
|
("system", "You are a helpful assistant."),
|
||||||
|
("user", prompt),
|
||||||
|
]
|
||||||
|
response = llm.invoke(messages)
|
||||||
|
answer = str(response.content)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
answer = f"Error generating answer: {e}. Fallback: Based on context: {context_text}, here is the answer."
|
answer = f"Error generating answer: {e}. Fallback: Based on context: {context_text}, here is the answer."
|
||||||
|
|
||||||
return {"answer": answer}
|
return {"answer": answer}
|
||||||
|
|
||||||
|
|
||||||
def reflector_node(_state: AgentState) -> dict[str, Any]:
|
def reflector_node(state: AgentState): # noqa: ANN201
|
||||||
|
"""Reflect on the quality of the answer."""
|
||||||
|
_ = state
|
||||||
return {"critique": "Answer appears sufficient."}
|
return {"critique": "Answer appears sufficient."}
|
||||||
|
|
||||||
|
|
||||||
workflow: Any = StateGraph(AgentState)
|
# Build the workflow using fluent pattern
|
||||||
|
workflow = (
|
||||||
workflow.add_node("planner", planner_node)
|
StateGraph(AgentState)
|
||||||
workflow.add_node("graph_tool", graph_tool_node)
|
.add_node("planner", planner_node)
|
||||||
workflow.add_node("vector_tool", vector_tool_node)
|
.add_node("graph_tool", graph_tool_node)
|
||||||
workflow.add_node("synthesizer", synthesizer_node)
|
.add_node("vector_tool", vector_tool_node)
|
||||||
workflow.add_node("reflector", reflector_node)
|
.add_node("synthesizer", synthesizer_node)
|
||||||
|
.add_node("reflector", reflector_node)
|
||||||
|
)
|
||||||
|
|
||||||
workflow.set_entry_point("planner")
|
workflow.set_entry_point("planner")
|
||||||
|
|
||||||
@@ -97,12 +117,7 @@ workflow.add_edge("reflector", END)
|
|||||||
|
|
||||||
|
|
||||||
def run_agent(question: str) -> dict[str, Any]:
|
def run_agent(question: str) -> dict[str, Any]:
|
||||||
|
"""Run the agent workflow with the given question."""
|
||||||
app = workflow.compile()
|
app = workflow.compile()
|
||||||
inputs: AgentState = {
|
inputs = AgentState(question=question)
|
||||||
"question": question,
|
|
||||||
"plan": [],
|
|
||||||
"context": [],
|
|
||||||
"answer": "",
|
|
||||||
"critique": None,
|
|
||||||
}
|
|
||||||
return app.invoke(inputs)
|
return app.invoke(inputs)
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import json
|
from typing import TYPE_CHECKING, cast
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from helia.assessment.schema import AssessmentResult, Evidence, PHQ8Item, RunConfig
|
from helia.assessment.schema import AssessmentResponse, AssessmentResult, RunConfig
|
||||||
from helia.ingestion.parser import TranscriptParser
|
from helia.ingestion.parser import TranscriptParser
|
||||||
from helia.llm.client import get_openai_client
|
from helia.llm.client import get_chat_model
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -48,36 +47,31 @@ INSTRUCTIONS:
|
|||||||
|
|
||||||
TRANSCRIPT:
|
TRANSCRIPT:
|
||||||
{transcript_text}
|
{transcript_text}
|
||||||
|
|
||||||
OUTPUT FORMAT:
|
|
||||||
Return a JSON object with a key "items" which is a list of 8 objects.
|
|
||||||
Each object must have:
|
|
||||||
- "question_id": (int) 1-8
|
|
||||||
- "question_text": (str) The text of the criterion
|
|
||||||
- "score": (int) 0-3
|
|
||||||
- "evidence": (list) List of objects with "quote" and "reasoning".
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
# PHQ-8 Scoring Constants
|
|
||||||
DIAGNOSIS_THRESHOLD = 10
|
|
||||||
SYMPTOM_SEVERITY_THRESHOLD = 2
|
|
||||||
MAJOR_DEPRESSION_MIN_SEVERE = 5
|
|
||||||
OTHER_DEPRESSION_MIN_SEVERE = 2
|
|
||||||
|
|
||||||
|
|
||||||
class PHQ8Evaluator:
|
class PHQ8Evaluator:
|
||||||
def __init__(self, config: RunConfig) -> None:
|
def __init__(self, config: RunConfig) -> None:
|
||||||
self.config = config
|
self.config = config
|
||||||
self.client = get_openai_client() # Client config is global, but model is per-request
|
|
||||||
self.parser = TranscriptParser()
|
self.parser = TranscriptParser()
|
||||||
|
|
||||||
|
# Initialize LangChain Chat Model
|
||||||
|
self.llm = get_chat_model(
|
||||||
|
model_name=self.config.model_name,
|
||||||
|
api_key=self.config.api_key,
|
||||||
|
base_url=self.config.api_base,
|
||||||
|
temperature=self.config.temperature,
|
||||||
|
)
|
||||||
|
|
||||||
def _load_prompt(self, prompt_id: str) -> str:
|
def _load_prompt(self, prompt_id: str) -> str:
|
||||||
if prompt_id == "default":
|
if prompt_id == "default":
|
||||||
return DEFAULT_PROMPT
|
return DEFAULT_PROMPT
|
||||||
raise ValueError(f"Unknown prompt_id: {prompt_id}")
|
raise ValueError(f"Unknown prompt_id: {prompt_id}")
|
||||||
|
|
||||||
def evaluate(self, file_path: Path) -> AssessmentResult:
|
async def evaluate(self, file_path: Path) -> AssessmentResult:
|
||||||
|
"""
|
||||||
|
Asynchronously evaluate a transcript using the configured LLM.
|
||||||
|
"""
|
||||||
# 1. Parse Transcript
|
# 1. Parse Transcript
|
||||||
utterances = self.parser.parse(file_path)
|
utterances = self.parser.parse(file_path)
|
||||||
transcript_text = "\n".join([f"{u.speaker}: {u.text}" for u in utterances])
|
transcript_text = "\n".join([f"{u.speaker}: {u.text}" for u in utterances])
|
||||||
@@ -86,44 +80,18 @@ class PHQ8Evaluator:
|
|||||||
base_prompt = self._load_prompt(self.config.prompt_id)
|
base_prompt = self._load_prompt(self.config.prompt_id)
|
||||||
final_prompt = base_prompt.format(transcript_text=transcript_text)
|
final_prompt = base_prompt.format(transcript_text=transcript_text)
|
||||||
|
|
||||||
# 3. Call LLM
|
# 3. Call LLM (Async with Structured Output)
|
||||||
response = self.client.chat.completions.create(
|
structured_llm = self.llm.with_structured_output(AssessmentResponse)
|
||||||
model=self.config.model_name,
|
|
||||||
messages=[
|
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": "You are a clinical assessment system. Output valid JSON.",
|
|
||||||
},
|
|
||||||
{"role": "user", "content": final_prompt},
|
|
||||||
],
|
|
||||||
temperature=self.config.temperature,
|
|
||||||
response_format={"type": "json_object"},
|
|
||||||
)
|
|
||||||
|
|
||||||
content = response.choices[0].message.content
|
messages = [
|
||||||
if not content:
|
("system", "You are a clinical assessment system."),
|
||||||
raise ValueError("LLM returned empty response")
|
("user", final_prompt),
|
||||||
|
]
|
||||||
|
|
||||||
data = json.loads(content)
|
response_obj = cast("AssessmentResponse", await structured_llm.ainvoke(messages))
|
||||||
|
items = response_obj.items
|
||||||
|
|
||||||
# 4. Parse Response into Schema
|
# 4. Calculate Diagnostics
|
||||||
items = []
|
|
||||||
for item_data in data.get("items", []):
|
|
||||||
evidence_list = [
|
|
||||||
Evidence(quote=ev.get("quote", ""), reasoning=ev.get("reasoning", ""))
|
|
||||||
for ev in item_data.get("evidence", [])
|
|
||||||
]
|
|
||||||
|
|
||||||
items.append(
|
|
||||||
PHQ8Item(
|
|
||||||
question_id=item_data["question_id"],
|
|
||||||
question_text=item_data["question_text"],
|
|
||||||
score=item_data["score"],
|
|
||||||
evidence=evidence_list,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# 5. Calculate Diagnostics
|
|
||||||
total_score = sum(item.score for item in items)
|
total_score = sum(item.score for item in items)
|
||||||
diagnosis_cutpoint = total_score >= DIAGNOSIS_THRESHOLD
|
diagnosis_cutpoint = total_score >= DIAGNOSIS_THRESHOLD
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,10 @@ from pydantic import BaseModel, Field
|
|||||||
|
|
||||||
class RunConfig(BaseModel):
|
class RunConfig(BaseModel):
|
||||||
model_name: str
|
model_name: str
|
||||||
|
api_base: str | None = None
|
||||||
|
api_format: str = "openai"
|
||||||
|
# API Key is needed for runtime but excluded from DB persistence for security
|
||||||
|
api_key: str | None = Field(default=None, exclude=True)
|
||||||
prompt_id: str
|
prompt_id: str
|
||||||
temperature: float
|
temperature: float
|
||||||
timestamp: str
|
timestamp: str
|
||||||
@@ -22,6 +26,14 @@ class PHQ8Item(BaseModel):
|
|||||||
evidence: list[Evidence]
|
evidence: list[Evidence]
|
||||||
|
|
||||||
|
|
||||||
|
class AssessmentResponse(BaseModel):
|
||||||
|
"""
|
||||||
|
Wrapper for structured LLM output to match the expected JSON schema.
|
||||||
|
"""
|
||||||
|
|
||||||
|
items: list[PHQ8Item]
|
||||||
|
|
||||||
|
|
||||||
class AssessmentResult(Document):
|
class AssessmentResult(Document):
|
||||||
transcript_id: str
|
transcript_id: str
|
||||||
config: RunConfig
|
config: RunConfig
|
||||||
|
|||||||
@@ -1,37 +1,129 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Annotated, Literal
|
from typing import Literal, NamedTuple
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
from pydantic import BaseModel, Field, TypeAdapter
|
from pydantic import BaseModel, Field, TypeAdapter
|
||||||
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||||
|
|
||||||
|
|
||||||
class MongoConfig(BaseModel):
|
class S3Config(NamedTuple):
|
||||||
uri: str = "mongodb://localhost:27017"
|
"""S3 bucket configuration."""
|
||||||
database_name: str = "helia"
|
|
||||||
|
bucket_name: str
|
||||||
|
endpoint_url: str
|
||||||
|
aws_access_key_id: str
|
||||||
|
aws_secret_access_key: str
|
||||||
|
prefix: str = ""
|
||||||
|
region_name: str | None = None
|
||||||
|
|
||||||
|
|
||||||
class AssessConfig(BaseModel):
|
class SystemConfig(BaseSettings):
|
||||||
command: Literal["assess"] = "assess"
|
"""
|
||||||
input_file: str
|
System-level configuration loaded from environment variables.
|
||||||
model: str
|
Includes Database and AWS/S3 settings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
model_config = SettingsConfigDict(env_prefix="HELIA_", env_file=".env", extra="ignore")
|
||||||
|
|
||||||
|
mongo_uri: str = Field(..., description="MongoDB connection string")
|
||||||
|
database_name: str = Field("helia", description="MongoDB database name")
|
||||||
|
|
||||||
|
s3_endpoint: str = Field(..., description="S3 endpoint URL")
|
||||||
|
s3_access_key: str = Field(..., description="S3 access key")
|
||||||
|
s3_secret_key: str = Field(..., description="S3 secret key")
|
||||||
|
s3_bucket: str = Field(..., description="S3 bucket containing the dataset")
|
||||||
|
s3_prefix: str = Field("", description="S3 key prefix for dataset files")
|
||||||
|
s3_region: str | None = Field(None, description="S3 region name (optional)")
|
||||||
|
|
||||||
|
def get_s3_config(self) -> S3Config:
|
||||||
|
"""Create an S3Config from the system configuration."""
|
||||||
|
return S3Config(
|
||||||
|
bucket_name=self.s3_bucket,
|
||||||
|
endpoint_url=self.s3_endpoint,
|
||||||
|
aws_access_key_id=self.s3_access_key,
|
||||||
|
aws_secret_access_key=self.s3_secret_key,
|
||||||
|
prefix=self.s3_prefix,
|
||||||
|
region_name=self.s3_region,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ProviderConfig(BaseModel):
|
||||||
|
"""
|
||||||
|
Configuration for an LLM provider
|
||||||
|
"""
|
||||||
|
|
||||||
|
api_key: str
|
||||||
|
api_base: str
|
||||||
|
api_format: Literal["openai", "anthropic", "ollama"] = "openai"
|
||||||
|
|
||||||
|
|
||||||
|
class ModelSpec(BaseModel):
|
||||||
|
"""
|
||||||
|
Specific model configuration for a run.
|
||||||
|
"""
|
||||||
|
|
||||||
|
provider: str
|
||||||
|
model_name: str
|
||||||
|
temperature: float = 0.0
|
||||||
|
|
||||||
|
|
||||||
|
class RunSpec(BaseModel):
|
||||||
|
"""
|
||||||
|
Configuration for a single experiment run.
|
||||||
|
"""
|
||||||
|
|
||||||
|
run_name: str
|
||||||
|
model: ModelSpec
|
||||||
prompt_id: str = "default"
|
prompt_id: str = "default"
|
||||||
temperature: float = 1.0
|
|
||||||
database: MongoConfig = Field(default_factory=MongoConfig)
|
|
||||||
|
class AssessBatchConfig(BaseModel):
|
||||||
|
"""
|
||||||
|
Configuration file structure for batch assessment.
|
||||||
|
"""
|
||||||
|
|
||||||
|
providers: dict[str, ProviderConfig]
|
||||||
|
runs: list[RunSpec]
|
||||||
|
|
||||||
|
|
||||||
class AgentConfig(BaseModel):
|
class AgentConfig(BaseModel):
|
||||||
|
# Placeholder for future agent config
|
||||||
command: Literal["agent"] = "agent"
|
command: Literal["agent"] = "agent"
|
||||||
question: str = "How many times did the interviewer interrupt?"
|
|
||||||
|
|
||||||
|
|
||||||
ConfigType = Annotated[AssessConfig | AgentConfig, Field(discriminator="command")]
|
ConfigType = AssessBatchConfig
|
||||||
|
|
||||||
|
|
||||||
|
def _expand_env_vars(yaml_content: str) -> str:
|
||||||
|
"""
|
||||||
|
Expand environment variables in the format ${VAR} or ${VAR:default}.
|
||||||
|
"""
|
||||||
|
pattern = re.compile(r"\$\{([^}^{]+)\}")
|
||||||
|
|
||||||
|
def replace(match: re.Match) -> str:
|
||||||
|
env_var = match.group(1)
|
||||||
|
default_value = ""
|
||||||
|
if ":" in env_var:
|
||||||
|
env_var, default_value = env_var.split(":", 1)
|
||||||
|
return os.environ.get(env_var, default_value)
|
||||||
|
|
||||||
|
return pattern.sub(replace, yaml_content)
|
||||||
|
|
||||||
|
|
||||||
def load_config(path: str | Path) -> ConfigType:
|
def load_config(path: str | Path) -> ConfigType:
|
||||||
with Path(path).open() as f:
|
with Path(path).open() as f:
|
||||||
data = yaml.safe_load(f)
|
content = f.read()
|
||||||
|
|
||||||
|
content = _expand_env_vars(content)
|
||||||
|
data = yaml.safe_load(content)
|
||||||
|
|
||||||
adapter = TypeAdapter(ConfigType)
|
adapter = TypeAdapter(ConfigType)
|
||||||
return adapter.validate_python(data)
|
return adapter.validate_python(data)
|
||||||
|
|
||||||
|
|
||||||
|
def load_system_config() -> SystemConfig:
|
||||||
|
return SystemConfig()
|
||||||
|
|||||||
@@ -8,11 +8,11 @@ from motor.motor_asyncio import AsyncIOMotorClient
|
|||||||
from helia.assessment.schema import AssessmentResult
|
from helia.assessment.schema import AssessmentResult
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from helia.configuration import MongoConfig
|
from helia.configuration import SystemConfig
|
||||||
|
|
||||||
|
|
||||||
async def init_db(config: MongoConfig) -> None:
|
async def init_db(config: SystemConfig) -> None:
|
||||||
client = AsyncIOMotorClient(config.uri)
|
client = AsyncIOMotorClient(config.mongo_uri)
|
||||||
await init_beanie(
|
await init_beanie(
|
||||||
database=client[config.database_name], # type: ignore[arg-type]
|
database=client[config.database_name], # type: ignore[arg-type]
|
||||||
document_models=[AssessmentResult],
|
document_models=[AssessmentResult],
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import csv
|
import csv
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
@@ -28,39 +30,29 @@ class Utterance(BaseModel):
|
|||||||
|
|
||||||
class TranscriptParser:
|
class TranscriptParser:
|
||||||
def parse(self, file_path: Path) -> list[Utterance]:
|
def parse(self, file_path: Path) -> list[Utterance]:
|
||||||
|
"""
|
||||||
|
Parse a DAIC-WOZ transcript file (tab-separated values).
|
||||||
|
Expected format: start_time stop_time speaker value
|
||||||
|
"""
|
||||||
with file_path.open(encoding="utf-8") as f:
|
with file_path.open(encoding="utf-8") as f:
|
||||||
lines = f.readlines()
|
# Skip the header line
|
||||||
|
header = f.readline().strip()
|
||||||
|
if header != "start_time\tstop_time\tspeaker\tvalue":
|
||||||
|
# Fallback/Check for potential malformed files or notify user
|
||||||
|
# For now, we strictly expect the standard header.
|
||||||
|
pass
|
||||||
|
|
||||||
if not lines:
|
# Read the rest using DictReader
|
||||||
return []
|
reader = csv.DictReader(
|
||||||
|
f, fieldnames=["start_time", "stop_time", "speaker", "value"], delimiter="\t"
|
||||||
header = lines[0].strip()
|
|
||||||
if header == "start_time\tstop_time\tspeaker\tvalue":
|
|
||||||
return self._parse_tsv(lines[1:])
|
|
||||||
|
|
||||||
return self._parse_simple(lines)
|
|
||||||
|
|
||||||
def _parse_tsv(self, lines: list[str]) -> list[Utterance]:
|
|
||||||
reader = csv.DictReader(
|
|
||||||
lines, fieldnames=["start_time", "stop_time", "speaker", "value"], delimiter="\t"
|
|
||||||
)
|
|
||||||
return [
|
|
||||||
Utterance(
|
|
||||||
id=f"u_{i}",
|
|
||||||
speaker=row["speaker"],
|
|
||||||
text=row["value"].strip(),
|
|
||||||
start_time=float(row["start_time"]),
|
|
||||||
end_time=float(row["stop_time"]),
|
|
||||||
)
|
)
|
||||||
for i, row in enumerate(reader)
|
return [
|
||||||
]
|
Utterance(
|
||||||
|
id=f"u_{i}",
|
||||||
def _parse_simple(self, lines: list[str]) -> list[Utterance]:
|
speaker=row["speaker"],
|
||||||
utterances = []
|
text=row["value"].strip(),
|
||||||
for i, line in enumerate(lines):
|
start_time=float(row["start_time"]),
|
||||||
if ":" in line:
|
end_time=float(row["stop_time"]),
|
||||||
speaker, text = line.split(":", 1)
|
|
||||||
utterances.append(
|
|
||||||
Utterance(id=f"u_{i}", speaker=speaker.strip(), text=text.strip())
|
|
||||||
)
|
)
|
||||||
return utterances
|
for i, row in enumerate(reader)
|
||||||
|
]
|
||||||
|
|||||||
66
src/helia/ingestion/s3.py
Normal file
66
src/helia/ingestion/s3.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
import boto3
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from helia.configuration import S3Config
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class S3DatasetLoader:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
s3_config: S3Config,
|
||||||
|
) -> None:
|
||||||
|
self.bucket_name = s3_config.bucket_name
|
||||||
|
self.prefix = s3_config.prefix
|
||||||
|
self.s3 = boto3.client(
|
||||||
|
"s3",
|
||||||
|
endpoint_url=s3_config.endpoint_url,
|
||||||
|
aws_access_key_id=s3_config.aws_access_key_id,
|
||||||
|
aws_secret_access_key=s3_config.aws_secret_access_key,
|
||||||
|
region_name=s3_config.region_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
def list_transcripts(self) -> list[str]:
|
||||||
|
"""
|
||||||
|
List all transcript CSV files in the bucket matching the DAIC-WOZ pattern.
|
||||||
|
Pattern: {participant_id}_P/{participant_id}_TRANSCRIPT.csv
|
||||||
|
"""
|
||||||
|
logger.info("Listing files in s3://%s/%s", self.bucket_name, self.prefix)
|
||||||
|
paginator = self.s3.get_paginator("list_objects_v2")
|
||||||
|
transcripts = []
|
||||||
|
|
||||||
|
for page in paginator.paginate(Bucket=self.bucket_name, Prefix=self.prefix):
|
||||||
|
if "Contents" not in page:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for obj in page["Contents"]:
|
||||||
|
key = obj["Key"]
|
||||||
|
if key.endswith("_TRANSCRIPT.csv"):
|
||||||
|
transcripts.append(key)
|
||||||
|
|
||||||
|
logger.info("Found %d transcripts", len(transcripts))
|
||||||
|
return transcripts
|
||||||
|
|
||||||
|
def download_file(self, key: str, local_path: Path) -> Path:
|
||||||
|
"""
|
||||||
|
Download a file from S3 to a local path (Blocking).
|
||||||
|
"""
|
||||||
|
local_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
logger.info("Downloading s3://%s/%s to %s", self.bucket_name, key, local_path)
|
||||||
|
self.s3.download_file(self.bucket_name, key, str(local_path))
|
||||||
|
return local_path
|
||||||
|
|
||||||
|
async def download_file_async(self, key: str, local_path: Path) -> Path:
|
||||||
|
"""
|
||||||
|
Download a file from S3 to a local path (Async wrapper around blocking call).
|
||||||
|
"""
|
||||||
|
return await asyncio.to_thread(self.download_file, key, local_path)
|
||||||
@@ -1,7 +1,16 @@
|
|||||||
from openai import OpenAI
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from langchain_openai import ChatOpenAI
|
||||||
|
from openai import AsyncOpenAI, OpenAI
|
||||||
|
from pydantic import SecretStr
|
||||||
|
|
||||||
from helia.llm.settings import settings
|
from helia.llm.settings import settings
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from langchain_core.language_models.chat_models import BaseChatModel
|
||||||
|
|
||||||
|
|
||||||
def get_openai_client() -> OpenAI:
|
def get_openai_client() -> OpenAI:
|
||||||
"""
|
"""
|
||||||
@@ -16,3 +25,37 @@ def get_openai_client() -> OpenAI:
|
|||||||
timeout=settings.timeout,
|
timeout=settings.timeout,
|
||||||
max_retries=settings.max_retries,
|
max_retries=settings.max_retries,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_async_openai_client() -> AsyncOpenAI:
|
||||||
|
"""
|
||||||
|
Returns a configured AsyncOpenAI client based on global settings.
|
||||||
|
"""
|
||||||
|
api_key = settings.resolve_api_key()
|
||||||
|
|
||||||
|
return AsyncOpenAI(
|
||||||
|
base_url=settings.base_url,
|
||||||
|
api_key=api_key,
|
||||||
|
timeout=settings.timeout,
|
||||||
|
max_retries=settings.max_retries,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_chat_model(
|
||||||
|
model_name: str,
|
||||||
|
api_key: str | None = None,
|
||||||
|
base_url: str | None = None,
|
||||||
|
temperature: float = 0.0,
|
||||||
|
max_retries: int = 3,
|
||||||
|
) -> BaseChatModel:
|
||||||
|
"""
|
||||||
|
Returns a configured LangChain ChatOpenAI instance.
|
||||||
|
Supports OpenRouter, Ollama, and OpenAI via base_url.
|
||||||
|
"""
|
||||||
|
return ChatOpenAI(
|
||||||
|
model_name=model_name,
|
||||||
|
openai_api_key=SecretStr(api_key or ""),
|
||||||
|
openai_api_base=base_url,
|
||||||
|
temperature=temperature,
|
||||||
|
max_retries=max_retries,
|
||||||
|
)
|
||||||
|
|||||||
@@ -7,12 +7,80 @@ from pathlib import Path
|
|||||||
from helia.agent.workflow import run_agent
|
from helia.agent.workflow import run_agent
|
||||||
from helia.assessment.core import PHQ8Evaluator
|
from helia.assessment.core import PHQ8Evaluator
|
||||||
from helia.assessment.schema import RunConfig
|
from helia.assessment.schema import RunConfig
|
||||||
from helia.configuration import load_config
|
from helia.configuration import (
|
||||||
|
AssessBatchConfig,
|
||||||
|
RunSpec,
|
||||||
|
S3Config,
|
||||||
|
load_config,
|
||||||
|
load_system_config,
|
||||||
|
)
|
||||||
from helia.db import init_db
|
from helia.db import init_db
|
||||||
|
from helia.ingestion.s3 import S3DatasetLoader
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
async def process_run(
|
||||||
|
run_spec: RunSpec,
|
||||||
|
input_source: str,
|
||||||
|
run_config_data: AssessBatchConfig,
|
||||||
|
s3_config: S3Config,
|
||||||
|
semaphore: asyncio.Semaphore,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Process a single run for a single transcript, bounded by a semaphore.
|
||||||
|
"""
|
||||||
|
async with semaphore:
|
||||||
|
# Resolve Provider
|
||||||
|
provider_name = run_spec.model.provider
|
||||||
|
if provider_name not in run_config_data.providers:
|
||||||
|
logger.error("Run %s refers to unknown provider %s", run_spec.run_name, provider_name)
|
||||||
|
return
|
||||||
|
|
||||||
|
provider_config = run_config_data.providers[provider_name]
|
||||||
|
|
||||||
|
# Download from S3 (Async)
|
||||||
|
loader = S3DatasetLoader(s3_config)
|
||||||
|
local_file = Path("data/downloads") / input_source
|
||||||
|
if not local_file.exists():
|
||||||
|
await loader.download_file_async(input_source, local_file)
|
||||||
|
|
||||||
|
input_path = local_file
|
||||||
|
item_id = Path(input_source).stem.split("_")[0] # Extract 300 from 300_TRANSCRIPT
|
||||||
|
run_name = f"{run_spec.run_name}_{item_id}"
|
||||||
|
|
||||||
|
logger.info("--- Processing: %s ---", run_name)
|
||||||
|
|
||||||
|
run_config = RunConfig(
|
||||||
|
model_name=run_spec.model.model_name,
|
||||||
|
api_base=provider_config.api_base,
|
||||||
|
api_key=provider_config.api_key,
|
||||||
|
api_format=provider_config.api_format,
|
||||||
|
prompt_id=run_spec.prompt_id,
|
||||||
|
temperature=run_spec.model.temperature,
|
||||||
|
timestamp=datetime.now(tz=UTC).isoformat(),
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
evaluator = PHQ8Evaluator(run_config)
|
||||||
|
# Await the async evaluation
|
||||||
|
result = await evaluator.evaluate(input_path)
|
||||||
|
|
||||||
|
# Save to DB (Async)
|
||||||
|
await result.insert()
|
||||||
|
|
||||||
|
logger.info("Assessment complete for %s.", run_name)
|
||||||
|
logger.info(
|
||||||
|
"ID: %s | Score: %s | Diagnosis: %s",
|
||||||
|
result.id,
|
||||||
|
result.total_score,
|
||||||
|
result.diagnosis_algorithm,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Failed to process %s", run_name)
|
||||||
|
|
||||||
|
|
||||||
async def main() -> None:
|
async def main() -> None:
|
||||||
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
||||||
|
|
||||||
@@ -33,34 +101,44 @@ async def main() -> None:
|
|||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
config = load_config(config_path)
|
run_config_data = load_config(config_path)
|
||||||
|
system_config = load_system_config()
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("Error loading configuration")
|
logger.exception("Error loading configuration")
|
||||||
return
|
return
|
||||||
|
|
||||||
if config.command == "assess":
|
# Check the type of configuration
|
||||||
await init_db(config.database)
|
if isinstance(run_config_data, AssessBatchConfig):
|
||||||
|
await init_db(system_config)
|
||||||
|
|
||||||
logger.info("Running assessment on %s...", config.input_file)
|
# Create S3 config once and reuse
|
||||||
|
s3_config = system_config.get_s3_config()
|
||||||
|
|
||||||
run_config = RunConfig(
|
# Discover transcripts (can remain sync or be made async, sync is fine for listing)
|
||||||
model_name=config.model,
|
logger.info("Discovering transcripts in S3 bucket: %s", s3_config.bucket_name)
|
||||||
prompt_id=config.prompt_id,
|
loader = S3DatasetLoader(s3_config)
|
||||||
temperature=config.temperature,
|
keys = loader.list_transcripts()
|
||||||
timestamp=datetime.now(tz=UTC).isoformat(),
|
|
||||||
)
|
|
||||||
|
|
||||||
evaluator = PHQ8Evaluator(run_config)
|
# Create task list
|
||||||
result = evaluator.evaluate(Path(config.input_file))
|
tasks_data = [(run_spec, key) for run_spec in run_config_data.runs for key in keys]
|
||||||
|
logger.info("Starting batch assessment with %d total items...", len(tasks_data))
|
||||||
|
|
||||||
await result.insert()
|
# Limit concurrency to 10 parallel requests
|
||||||
|
semaphore = asyncio.Semaphore(10)
|
||||||
|
|
||||||
logger.info("Assessment complete. Saved to MongoDB with ID: %s", result.id)
|
tasks = [
|
||||||
logger.info("Total Score: %s", result.total_score)
|
process_run(run_spec, key, run_config_data, s3_config, semaphore)
|
||||||
logger.info("Diagnosis (Alg): %s", result.diagnosis_algorithm)
|
for run_spec, key in tasks_data
|
||||||
|
]
|
||||||
|
|
||||||
elif config.command == "agent":
|
# Run all tasks concurrently
|
||||||
question = config.question
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
logger.info("Batch assessment complete.")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Agent command (Placeholder)
|
||||||
|
question = run_config_data.question
|
||||||
logger.info("\nRunning Re-Agent with question: '%s'\n", question)
|
logger.info("\nRunning Re-Agent with question: '%s'\n", question)
|
||||||
result = run_agent(question)
|
result = run_agent(question)
|
||||||
logger.info(result["answer"])
|
logger.info(result["answer"])
|
||||||
|
|||||||
143
uv.lock
generated
143
uv.lock
generated
@@ -43,6 +43,34 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/29/54/8c9a4ab2d82242074671cc35b1dd2a906c3c36b3a5c80e914c76fa9f45b7/beanie-2.0.1-py3-none-any.whl", hash = "sha256:3aad6cc0e40fb8d256a0a3fdeca92a7b3d3c1f9f47ff377c9ecd2221285e1009", size = 87693, upload-time = "2025-11-20T18:45:50.321Z" },
|
{ url = "https://files.pythonhosted.org/packages/29/54/8c9a4ab2d82242074671cc35b1dd2a906c3c36b3a5c80e914c76fa9f45b7/beanie-2.0.1-py3-none-any.whl", hash = "sha256:3aad6cc0e40fb8d256a0a3fdeca92a7b3d3c1f9f47ff377c9ecd2221285e1009", size = 87693, upload-time = "2025-11-20T18:45:50.321Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "boto3"
|
||||||
|
version = "1.42.14"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "botocore" },
|
||||||
|
{ name = "jmespath" },
|
||||||
|
{ name = "s3transfer" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/09/72/e236ca627bc0461710685f5b7438f759ef3b4106e0e08dda08513a6539ab/boto3-1.42.14.tar.gz", hash = "sha256:a5d005667b480c844ed3f814a59f199ce249d0f5669532a17d06200c0a93119c", size = 112825, upload-time = "2025-12-19T20:27:15.325Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bb/ba/c657ea6f6d63563cc46748202fccd097b51755d17add00ebe4ea27580d06/boto3-1.42.14-py3-none-any.whl", hash = "sha256:bfcc665227bb4432a235cb4adb47719438d6472e5ccbf7f09512046c3f749670", size = 140571, upload-time = "2025-12-19T20:27:13.316Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "botocore"
|
||||||
|
version = "1.42.14"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "jmespath" },
|
||||||
|
{ name = "python-dateutil" },
|
||||||
|
{ name = "urllib3" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/35/3f/50c56f093c2c6ce6de1f579726598db1cf9a9cccd3bf8693f73b1cf5e319/botocore-1.42.14.tar.gz", hash = "sha256:cf5bebb580803c6cfd9886902ca24834b42ecaa808da14fb8cd35ad523c9f621", size = 14910547, upload-time = "2025-12-19T20:27:04.431Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ad/94/67a78a8d08359e779894d4b1672658a3c7fcce216b48f06dfbe1de45521d/botocore-1.42.14-py3-none-any.whl", hash = "sha256:efe89adfafa00101390ec2c371d453b3359d5f9690261bc3bd70131e0d453e8e", size = 14583247, upload-time = "2025-12-19T20:27:00.54Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "certifi"
|
name = "certifi"
|
||||||
version = "2025.11.12"
|
version = "2025.11.12"
|
||||||
@@ -191,12 +219,10 @@ version = "0.1.0"
|
|||||||
source = { editable = "." }
|
source = { editable = "." }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "beanie" },
|
{ name = "beanie" },
|
||||||
{ name = "langchain" },
|
{ name = "boto3" },
|
||||||
{ name = "langchain-openai" },
|
{ name = "langchain", extra = ["openai"] },
|
||||||
{ name = "langgraph" },
|
{ name = "langgraph" },
|
||||||
{ name = "motor" },
|
{ name = "motor" },
|
||||||
{ name = "neo4j" },
|
|
||||||
{ name = "openai" },
|
|
||||||
{ name = "pydantic" },
|
{ name = "pydantic" },
|
||||||
{ name = "pydantic-settings" },
|
{ name = "pydantic-settings" },
|
||||||
{ name = "pyyaml" },
|
{ name = "pyyaml" },
|
||||||
@@ -205,19 +231,17 @@ dependencies = [
|
|||||||
|
|
||||||
[package.dev-dependencies]
|
[package.dev-dependencies]
|
||||||
dev = [
|
dev = [
|
||||||
{ name = "pyrefly" },
|
|
||||||
{ name = "ruff" },
|
{ name = "ruff" },
|
||||||
|
{ name = "ty" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.metadata]
|
[package.metadata]
|
||||||
requires-dist = [
|
requires-dist = [
|
||||||
{ name = "beanie", specifier = ">=2.0.1" },
|
{ name = "beanie", specifier = ">=2.0.1" },
|
||||||
{ name = "langchain", specifier = ">=0.1.0" },
|
{ name = "boto3", specifier = ">=1.42.14" },
|
||||||
{ name = "langchain-openai", specifier = ">=0.1.0" },
|
{ name = "langchain", extras = ["openai"], specifier = ">=1.2.0" },
|
||||||
{ name = "langgraph", specifier = ">=1.0.5" },
|
{ name = "langgraph", specifier = ">=1.0.5" },
|
||||||
{ name = "motor", specifier = ">=3.7.1" },
|
{ name = "motor", specifier = ">=3.7.1" },
|
||||||
{ name = "neo4j", specifier = ">=5.19.0" },
|
|
||||||
{ name = "openai", specifier = ">=2.14.0" },
|
|
||||||
{ name = "pydantic", specifier = ">=2.12.5" },
|
{ name = "pydantic", specifier = ">=2.12.5" },
|
||||||
{ name = "pydantic-settings", specifier = ">=2.12.0" },
|
{ name = "pydantic-settings", specifier = ">=2.12.0" },
|
||||||
{ name = "pyyaml", specifier = ">=6.0.1" },
|
{ name = "pyyaml", specifier = ">=6.0.1" },
|
||||||
@@ -226,8 +250,8 @@ requires-dist = [
|
|||||||
|
|
||||||
[package.metadata.requires-dev]
|
[package.metadata.requires-dev]
|
||||||
dev = [
|
dev = [
|
||||||
{ name = "pyrefly", specifier = ">=0.46.0" },
|
|
||||||
{ name = "ruff", specifier = ">=0.14.10" },
|
{ name = "ruff", specifier = ">=0.14.10" },
|
||||||
|
{ name = "ty", specifier = ">=0.0.5" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -341,6 +365,15 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/97/9a/3c5391907277f0e55195550cf3fa8e293ae9ee0c00fb402fec1e38c0c82f/jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473", size = 185564, upload-time = "2025-11-09T20:48:50.376Z" },
|
{ url = "https://files.pythonhosted.org/packages/97/9a/3c5391907277f0e55195550cf3fa8e293ae9ee0c00fb402fec1e38c0c82f/jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473", size = 185564, upload-time = "2025-11-09T20:48:50.376Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "jmespath"
|
||||||
|
version = "1.0.1"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonpatch"
|
name = "jsonpatch"
|
||||||
version = "1.33"
|
version = "1.33"
|
||||||
@@ -376,6 +409,11 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/23/00/4e3fa0d90f5a5c376ccb8ca983d0f0f7287783dfac48702e18f01d24673b/langchain-1.2.0-py3-none-any.whl", hash = "sha256:82f0d17aa4fbb11560b30e1e7d4aeb75e3ad71ce09b85c90ab208b181a24ffac", size = 102828, upload-time = "2025-12-15T14:51:40.802Z" },
|
{ url = "https://files.pythonhosted.org/packages/23/00/4e3fa0d90f5a5c376ccb8ca983d0f0f7287783dfac48702e18f01d24673b/langchain-1.2.0-py3-none-any.whl", hash = "sha256:82f0d17aa4fbb11560b30e1e7d4aeb75e3ad71ce09b85c90ab208b181a24ffac", size = 102828, upload-time = "2025-12-15T14:51:40.802Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[package.optional-dependencies]
|
||||||
|
openai = [
|
||||||
|
{ name = "langchain-openai" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "langchain-core"
|
name = "langchain-core"
|
||||||
version = "1.2.4"
|
version = "1.2.4"
|
||||||
@@ -508,18 +546,6 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/01/9a/35e053d4f442addf751ed20e0e922476508ee580786546d699b0567c4c67/motor-3.7.1-py3-none-any.whl", hash = "sha256:8a63b9049e38eeeb56b4fdd57c3312a6d1f25d01db717fe7d82222393c410298", size = 74996, upload-time = "2025-05-14T18:56:31.665Z" },
|
{ url = "https://files.pythonhosted.org/packages/01/9a/35e053d4f442addf751ed20e0e922476508ee580786546d699b0567c4c67/motor-3.7.1-py3-none-any.whl", hash = "sha256:8a63b9049e38eeeb56b4fdd57c3312a6d1f25d01db717fe7d82222393c410298", size = 74996, upload-time = "2025-05-14T18:56:31.665Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "neo4j"
|
|
||||||
version = "6.0.3"
|
|
||||||
source = { registry = "https://pypi.org/simple" }
|
|
||||||
dependencies = [
|
|
||||||
{ name = "pytz" },
|
|
||||||
]
|
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/60/b2/87534fc0520e5f9db1432bacc3f8d0ce024608010babc4f65b96e0c34906/neo4j-6.0.3.tar.gz", hash = "sha256:7fb79e166e281aafd67d521f6611763ebcdc529f26db506c5605f91ddcd825ea", size = 239653, upload-time = "2025-11-06T16:57:57.012Z" }
|
|
||||||
wheels = [
|
|
||||||
{ url = "https://files.pythonhosted.org/packages/ba/fe/55ed1d4636defb57fae1f7be7818820aa8071d45949c91ef8649930e70c5/neo4j-6.0.3-py3-none-any.whl", hash = "sha256:a92023854da96aed4270e0d03d6429cdd7f0d3335eae977370934f4732de5678", size = 325433, upload-time = "2025-11-06T16:57:55.03Z" },
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "numpy"
|
name = "numpy"
|
||||||
version = "2.3.5"
|
version = "2.3.5"
|
||||||
@@ -818,19 +844,15 @@ wheels = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pyrefly"
|
name = "python-dateutil"
|
||||||
version = "0.46.0"
|
version = "2.9.0.post0"
|
||||||
source = { registry = "https://pypi.org/simple" }
|
source = { registry = "https://pypi.org/simple" }
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/41/2e/5bf8c90b553d7d058151293c97d1cde8614a0b7f8e9fbad9641600f109b2/pyrefly-0.46.0.tar.gz", hash = "sha256:4fa77309286f850bf4378b8bdfd15b812a5c2927454ab015f01cbdb7b90dc333", size = 5069085, upload-time = "2025-12-15T12:05:08.923Z" }
|
dependencies = [
|
||||||
|
{ name = "six" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" }
|
||||||
wheels = [
|
wheels = [
|
||||||
{ url = "https://files.pythonhosted.org/packages/93/06/6799fcc02ebb2424063b4ef57ce03d943cbb08a5d3585d9fba422a3309e0/pyrefly-0.46.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d71a303bc218ff4f803eae6e0f757bfea17c809cc145eea09f8f49b6158754ad", size = 11926321, upload-time = "2025-12-15T12:04:49.377Z" },
|
{ url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
|
||||||
{ url = "https://files.pythonhosted.org/packages/d6/0a/869e16ef40f0e494ed15c5cec85b9aeac9f0e3523e27ce239b42f957f2e1/pyrefly-0.46.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:0e7f001e979df4b4e125aa8d3d462978b4ac58ffc1f5bb7cc5e3a7572827a6fd", size = 11545813, upload-time = "2025-12-15T12:04:51.763Z" },
|
|
||||||
{ url = "https://files.pythonhosted.org/packages/7e/39/7897966edead5fcc02c461b0c5d3e8a006eb84171b874b7362cb923a5cb0/pyrefly-0.46.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07fd293b2e7441db7756e6cfa004c9a73da6a0925845947ab3b8ea05e913f99", size = 31558238, upload-time = "2025-12-15T12:04:54.144Z" },
|
|
||||||
{ url = "https://files.pythonhosted.org/packages/37/bb/77f5854b681c10d33139c816a9894ed89d13c9814278be81e2d459bdd1b8/pyrefly-0.46.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a893a08feefa3543467b41724ef045e678a806586198c63ce8a670d343ff677", size = 33764532, upload-time = "2025-12-15T12:04:56.978Z" },
|
|
||||||
{ url = "https://files.pythonhosted.org/packages/70/b0/55efd3ea7a7fa04b396abb4d7f7bd93911f3b2a3f05436291ab7554ca3c8/pyrefly-0.46.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d43783984d8cb4167e3d07c96477ba1d8367ef7b6c27621d9cc1af5ccc1cf44", size = 34819583, upload-time = "2025-12-15T12:04:59.461Z" },
|
|
||||||
{ url = "https://files.pythonhosted.org/packages/e0/bb/f3fb05d6faa2d2eaa46f8efb8036b8a7f2478fca47329b64f3a24aa948a1/pyrefly-0.46.0-py3-none-win32.whl", hash = "sha256:3d8482bc9c3e1e7f881a980906264b2348abb51d61e0cf2dd5c9afcf10197201", size = 11009453, upload-time = "2025-12-15T12:05:02.103Z" },
|
|
||||||
{ url = "https://files.pythonhosted.org/packages/60/d3/01309bbfa115079cefc1d027fa13ff8803891fc044a32aadfb58d3ee47cc/pyrefly-0.46.0-py3-none-win_amd64.whl", hash = "sha256:7ad8615cfd0523a8fccedeaaa0c963a7577255095a7d945fcf1693cfdde90716", size = 11692961, upload-time = "2025-12-15T12:05:04.449Z" },
|
|
||||||
{ url = "https://files.pythonhosted.org/packages/ce/96/4369be3bc402f9c41f5b7bfef5d547b9ddf85e6bd48b8ccaa1c5f593c61b/pyrefly-0.46.0-py3-none-win_arm64.whl", hash = "sha256:9c032a30ecba3b1b0f428f1d3bb14fbd63ad0bba60d56b857888372d99ce558f", size = 11238385, upload-time = "2025-12-15T12:05:06.771Z" },
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -842,15 +864,6 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" },
|
{ url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "pytz"
|
|
||||||
version = "2025.2"
|
|
||||||
source = { registry = "https://pypi.org/simple" }
|
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" }
|
|
||||||
wheels = [
|
|
||||||
{ url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" },
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pywin32"
|
name = "pywin32"
|
||||||
version = "311"
|
version = "311"
|
||||||
@@ -1035,6 +1048,27 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/74/31/b0e29d572670dca3674eeee78e418f20bdf97fa8aa9ea71380885e175ca0/ruff-0.14.10-py3-none-win_arm64.whl", hash = "sha256:e51d046cf6dda98a4633b8a8a771451107413b0f07183b2bef03f075599e44e6", size = 13729839, upload-time = "2025-12-18T19:28:48.636Z" },
|
{ url = "https://files.pythonhosted.org/packages/74/31/b0e29d572670dca3674eeee78e418f20bdf97fa8aa9ea71380885e175ca0/ruff-0.14.10-py3-none-win_arm64.whl", hash = "sha256:e51d046cf6dda98a4633b8a8a771451107413b0f07183b2bef03f075599e44e6", size = 13729839, upload-time = "2025-12-18T19:28:48.636Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "s3transfer"
|
||||||
|
version = "0.16.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "botocore" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/05/04/74127fc843314818edfa81b5540e26dd537353b123a4edc563109d8f17dd/s3transfer-0.16.0.tar.gz", hash = "sha256:8e990f13268025792229cd52fa10cb7163744bf56e719e0b9cb925ab79abf920", size = 153827, upload-time = "2025-12-01T02:30:59.114Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/fc/51/727abb13f44c1fcf6d145979e1535a35794db0f6e450a0cb46aa24732fe2/s3transfer-0.16.0-py3-none-any.whl", hash = "sha256:18e25d66fed509e3868dc1572b3f427ff947dd2c56f844a5bf09481ad3f3b2fe", size = 86830, upload-time = "2025-12-01T02:30:57.729Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "six"
|
||||||
|
version = "1.17.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sniffio"
|
name = "sniffio"
|
||||||
version = "1.3.1"
|
version = "1.3.1"
|
||||||
@@ -1105,6 +1139,31 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
|
{ url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ty"
|
||||||
|
version = "0.0.5"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/9e/db/6299d478000f4f1c6f9bf2af749359381610ffc4cbe6713b66e436ecf6e7/ty-0.0.5.tar.gz", hash = "sha256:983da6330773ff71e2b249810a19c689f9a0372f6e21bbf7cde37839d05b4346", size = 4806218, upload-time = "2025-12-20T21:19:17.24Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7c/98/c1f61ba378b4191e641bb36c07b7fcc70ff844d61be7a4bf2fea7472b4a9/ty-0.0.5-py3-none-linux_armv6l.whl", hash = "sha256:1594cd9bb68015eb2f5a3c68a040860f3c9306dc6667d7a0e5f4df9967b460e2", size = 9785554, upload-time = "2025-12-20T21:19:05.024Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ab/f9/b37b77c03396bd779c1397dae4279b7ad79315e005b3412feed8812a4256/ty-0.0.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7c0140ba980233d28699d9ddfe8f43d0b3535d6a3bbff9935df625a78332a3cf", size = 9603995, upload-time = "2025-12-20T21:19:15.256Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7d/70/4e75c11903b0e986c0203040472627cb61d6a709e1797fb08cdf9d565743/ty-0.0.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:15de414712cde92048ae4b1a77c4dc22920bd23653fe42acaf73028bad88f6b9", size = 9145815, upload-time = "2025-12-20T21:19:36.481Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/89/05/93983dfcf871a41dfe58e5511d28e6aa332a1f826cc67333f77ae41a2f8a/ty-0.0.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:438aa51ad6c5fae64191f8d58876266e26f9250cf09f6624b6af47a22fa88618", size = 9619849, upload-time = "2025-12-20T21:19:19.084Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/82/b6/896ab3aad59f846823f202e94be6016fb3f72434d999d2ae9bd0f28b3af9/ty-0.0.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b3d373fd96af1564380caf153600481c676f5002ee76ba8a7c3508cdff82ee0", size = 9606611, upload-time = "2025-12-20T21:19:24.583Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ca/ae/098e33fc92330285ed843e2750127e896140c4ebd2d73df7732ea496f588/ty-0.0.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8453692503212ad316cf8b99efbe85a91e5f63769c43be5345e435a1b16cba5a", size = 10029523, upload-time = "2025-12-20T21:19:07.055Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/04/5a/f4b4c33758b9295e9aca0de9645deca0f4addd21d38847228723a6e780fc/ty-0.0.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2e4c454139473abbd529767b0df7a795ed828f780aef8d0d4b144558c0dc4446", size = 10870892, upload-time = "2025-12-20T21:19:34.495Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c3/c5/4e3e7e88389365aa1e631c99378711cf0c9d35a67478cb4720584314cf44/ty-0.0.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:426d4f3b82475b1ec75f3cc9ee5a667c8a4ae8441a09fcd8e823a53b706d00c7", size = 10599291, upload-time = "2025-12-20T21:19:26.557Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c1/5d/138f859ea87bd95e17b9818e386ae25a910e46521c41d516bf230ed83ffc/ty-0.0.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5710817b67c6b2e4c0224e4f319b7decdff550886e9020f6d46aa1ce8f89a609", size = 10413515, upload-time = "2025-12-20T21:19:11.094Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/27/21/1cbcd0d3b1182172f099e88218137943e0970603492fb10c7c9342369d9a/ty-0.0.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23c55ef08882c7c5ced1ccb90b4eeefa97f690aea254f58ac0987896c590f76", size = 10144992, upload-time = "2025-12-20T21:19:13.225Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ad/30/fdac06a5470c09ad2659a0806497b71f338b395d59e92611f71b623d05a0/ty-0.0.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b9e4c1a28a23b14cf8f4f793f4da396939f16c30bfa7323477c8cc234e352ac4", size = 9606408, upload-time = "2025-12-20T21:19:09.212Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/09/93/e99dcd7f53295192d03efd9cbcec089a916f49cad4935c0160ea9adbd53d/ty-0.0.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4e9ebb61529b9745af662e37c37a01ad743cdd2c95f0d1421705672874d806cd", size = 9630040, upload-time = "2025-12-20T21:19:38.165Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d7/f8/6d1e87186e4c35eb64f28000c1df8fd5f73167ce126c5e3dd21fd1204a23/ty-0.0.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5eb191a8e332f50f56dfe45391bdd7d43dd4ef6e60884710fd7ce84c5d8c1eb5", size = 9754016, upload-time = "2025-12-20T21:19:32.79Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/28/e6/20f989342cb3115852dda404f1d89a10a3ce93f14f42b23f095a3d1a00c9/ty-0.0.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:92ed7451a1e82ee134a2c24ca43b74dd31e946dff2b08e5c34473e6b051de542", size = 10252877, upload-time = "2025-12-20T21:19:20.787Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/57/9d/fc66fa557443233dfad9ae197ff3deb70ae0efcfb71d11b30ef62f5cdcc3/ty-0.0.5-py3-none-win32.whl", hash = "sha256:71f6707e4c1c010c158029a688a498220f28bb22fdb6707e5c20e09f11a5e4f2", size = 9212640, upload-time = "2025-12-20T21:19:30.817Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/68/b6/05c35f6dea29122e54af0e9f8dfedd0a100c721affc8cc801ebe2bc2ed13/ty-0.0.5-py3-none-win_amd64.whl", hash = "sha256:2b8b754a0d7191e94acdf0c322747fec34371a4d0669f5b4e89549aef28814ae", size = 10034701, upload-time = "2025-12-20T21:19:28.311Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/df/ca/4201ed5cb2af73912663d0c6ded927c28c28b3c921c9348aa8d2cfef4853/ty-0.0.5-py3-none-win_arm64.whl", hash = "sha256:83bea5a5296caac20d52b790ded2b830a7ff91c4ed9f36730fe1f393ceed6654", size = 9566474, upload-time = "2025-12-20T21:19:22.518Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "typing-extensions"
|
name = "typing-extensions"
|
||||||
version = "4.15.0"
|
version = "4.15.0"
|
||||||
|
|||||||
Reference in New Issue
Block a user