Spaces:
Sleeping
Sleeping
Blu3Orange
commited on
Commit
·
8944fd8
1
Parent(s):
c69eb15
Add agent implementation and configuration loader for jurors
Browse files- agents/__init__.py +9 -0
- agents/base_juror.py +245 -0
- agents/config_loader.py +67 -0
agents/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Agent implementations for 12 Angry Agents."""
|
| 2 |
+
|
| 3 |
+
from .base_juror import JurorAgent
|
| 4 |
+
from .config_loader import load_juror_configs
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
"JurorAgent",
|
| 8 |
+
"load_juror_configs",
|
| 9 |
+
]
|
agents/base_juror.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Base juror agent implementation using Gemini."""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
import random
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
from google import genai
|
| 10 |
+
from google.genai import types
|
| 11 |
+
|
| 12 |
+
from core.models import JurorConfig, JurorMemory, ArgumentMemory
|
| 13 |
+
from core.game_state import GameState, DeliberationTurn
|
| 14 |
+
from core.conviction import conviction_to_text
|
| 15 |
+
from case_db.models import CriminalCase
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class JurorAgent:
|
| 19 |
+
"""AI-powered juror agent using Gemini for reasoning."""
|
| 20 |
+
|
| 21 |
+
def __init__(self, config: JurorConfig, api_key: str | None = None):
|
| 22 |
+
"""Initialize juror agent.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
config: Juror configuration.
|
| 26 |
+
api_key: Gemini API key. Defaults to GEMINI_API_KEY env var.
|
| 27 |
+
"""
|
| 28 |
+
self.config = config
|
| 29 |
+
self.memory = JurorMemory(juror_id=config.juror_id)
|
| 30 |
+
|
| 31 |
+
# Initialize Gemini client
|
| 32 |
+
api_key = api_key or os.getenv("GEMINI_API_KEY")
|
| 33 |
+
if not api_key:
|
| 34 |
+
raise ValueError("GEMINI_API_KEY not set")
|
| 35 |
+
self.client = genai.Client(api_key=api_key)
|
| 36 |
+
|
| 37 |
+
def _build_system_prompt(self, case: CriminalCase, game_state: GameState) -> str:
|
| 38 |
+
"""Build the system prompt for the juror."""
|
| 39 |
+
guilty, not_guilty = game_state.get_vote_tally()
|
| 40 |
+
|
| 41 |
+
# Get recent arguments
|
| 42 |
+
recent_args = self.memory.get_recent_arguments(5)
|
| 43 |
+
recent_args_text = ""
|
| 44 |
+
if recent_args:
|
| 45 |
+
recent_args_text = "\n".join(
|
| 46 |
+
f"- {arg.speaker_id}: [{arg.argument_type}] {arg.content_summary}"
|
| 47 |
+
for arg in recent_args
|
| 48 |
+
)
|
| 49 |
+
else:
|
| 50 |
+
recent_args_text = "No arguments have been made yet."
|
| 51 |
+
|
| 52 |
+
# Format doubts
|
| 53 |
+
doubts_text = ", ".join(self.memory.doubts[:3]) if self.memory.doubts else "None currently"
|
| 54 |
+
|
| 55 |
+
# Format reasoning chain
|
| 56 |
+
reasoning_text = " ".join(self.memory.reasoning_chain[-3:]) if self.memory.reasoning_chain else "Still forming opinion."
|
| 57 |
+
|
| 58 |
+
return f"""# JUROR IDENTITY
|
| 59 |
+
You are {self.config.name}, Juror #{self.config.seat_number}.
|
| 60 |
+
{self.config.personality_prompt}
|
| 61 |
+
|
| 62 |
+
# THE CASE: {case.title}
|
| 63 |
+
{case.summary}
|
| 64 |
+
|
| 65 |
+
# KEY EVIDENCE
|
| 66 |
+
{case.get_evidence_summary()}
|
| 67 |
+
|
| 68 |
+
# YOUR CURRENT POSITION
|
| 69 |
+
- Conviction Level: {conviction_to_text(self.memory.current_conviction)}
|
| 70 |
+
- Your reasoning: {reasoning_text}
|
| 71 |
+
- Your doubts: {doubts_text}
|
| 72 |
+
|
| 73 |
+
# RECENT DELIBERATION
|
| 74 |
+
{recent_args_text}
|
| 75 |
+
|
| 76 |
+
# CURRENT VOTE TALLY
|
| 77 |
+
Guilty: {guilty} | Not Guilty: {not_guilty}
|
| 78 |
+
|
| 79 |
+
# INSTRUCTIONS
|
| 80 |
+
You must respond IN CHARACTER as {self.config.name}. Stay true to your personality:
|
| 81 |
+
- Archetype: {self.config.archetype}
|
| 82 |
+
- Be authentic to your background and perspective
|
| 83 |
+
- Keep your argument focused and natural (2-4 sentences typically)
|
| 84 |
+
- You may address other jurors directly or speak to the room
|
| 85 |
+
- Consider but don't simply repeat previous arguments
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
async def generate_argument(
|
| 89 |
+
self,
|
| 90 |
+
case: CriminalCase,
|
| 91 |
+
game_state: GameState,
|
| 92 |
+
) -> DeliberationTurn:
|
| 93 |
+
"""Generate an argument for this juror's turn.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
case: The criminal case.
|
| 97 |
+
game_state: Current game state.
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
DeliberationTurn with the generated argument.
|
| 101 |
+
"""
|
| 102 |
+
system_prompt = self._build_system_prompt(case, game_state)
|
| 103 |
+
|
| 104 |
+
# Determine argument type based on archetype tendencies
|
| 105 |
+
argument_types = self._get_preferred_argument_types()
|
| 106 |
+
selected_type = random.choice(argument_types)
|
| 107 |
+
|
| 108 |
+
user_prompt = f"""Make an argument in the deliberation. Your argument style should lean toward: {selected_type}
|
| 109 |
+
|
| 110 |
+
Respond with a JSON object in this exact format:
|
| 111 |
+
{{
|
| 112 |
+
"argument_type": "{selected_type}",
|
| 113 |
+
"content": "Your argument here - speak naturally as your character would",
|
| 114 |
+
"target_juror": null or "juror_X" if addressing someone specific,
|
| 115 |
+
"internal_reasoning": "Brief note about why you're making this argument"
|
| 116 |
+
}}
|
| 117 |
+
|
| 118 |
+
Remember to stay in character as {self.config.name}!"""
|
| 119 |
+
|
| 120 |
+
try:
|
| 121 |
+
response = await asyncio.to_thread(
|
| 122 |
+
self.client.models.generate_content,
|
| 123 |
+
model=self.config.model_id,
|
| 124 |
+
contents=[
|
| 125 |
+
types.Content(
|
| 126 |
+
role="user",
|
| 127 |
+
parts=[types.Part(text=system_prompt + "\n\n" + user_prompt)]
|
| 128 |
+
)
|
| 129 |
+
],
|
| 130 |
+
config=types.GenerateContentConfig(
|
| 131 |
+
temperature=self.config.temperature,
|
| 132 |
+
response_mime_type="application/json",
|
| 133 |
+
),
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# Parse response
|
| 137 |
+
response_text = response.text.strip()
|
| 138 |
+
result = json.loads(response_text)
|
| 139 |
+
|
| 140 |
+
# Create turn
|
| 141 |
+
turn = DeliberationTurn(
|
| 142 |
+
round_number=game_state.round_number,
|
| 143 |
+
speaker_id=self.config.juror_id,
|
| 144 |
+
speaker_name=self.config.name,
|
| 145 |
+
argument_type=result.get("argument_type", selected_type),
|
| 146 |
+
content=result.get("content", "I need more time to think about this."),
|
| 147 |
+
target_id=result.get("target_juror"),
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
# Update own memory
|
| 151 |
+
self.memory.arguments_made.append(turn.content)
|
| 152 |
+
if result.get("internal_reasoning"):
|
| 153 |
+
self.memory.reasoning_chain.append(result["internal_reasoning"])
|
| 154 |
+
|
| 155 |
+
return turn
|
| 156 |
+
|
| 157 |
+
except Exception as e:
|
| 158 |
+
# Fallback response on error
|
| 159 |
+
print(f"Error generating argument for {self.config.name}: {e}")
|
| 160 |
+
return DeliberationTurn(
|
| 161 |
+
round_number=game_state.round_number,
|
| 162 |
+
speaker_id=self.config.juror_id,
|
| 163 |
+
speaker_name=self.config.name,
|
| 164 |
+
argument_type="observation",
|
| 165 |
+
content=f"*{self.config.name} pauses thoughtfully* I'm still considering the evidence...",
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
def _get_preferred_argument_types(self) -> list[str]:
|
| 169 |
+
"""Get argument types this archetype prefers."""
|
| 170 |
+
archetype_preferences = {
|
| 171 |
+
"rationalist": ["logical", "evidence", "question"],
|
| 172 |
+
"empath": ["emotional", "moral", "narrative"],
|
| 173 |
+
"cynic": ["evidence", "logical", "observation"],
|
| 174 |
+
"conformist": ["observation", "question", "emotional"],
|
| 175 |
+
"contrarian": ["question", "logical", "challenge"],
|
| 176 |
+
"impatient": ["observation", "logical", "evidence"],
|
| 177 |
+
"detail_obsessed": ["evidence", "question", "logical"],
|
| 178 |
+
"moralist": ["moral", "emotional", "narrative"],
|
| 179 |
+
"pragmatist": ["logical", "evidence", "observation"],
|
| 180 |
+
"storyteller": ["narrative", "question", "emotional"],
|
| 181 |
+
"wildcard": ["observation", "question", "emotional", "narrative"],
|
| 182 |
+
}
|
| 183 |
+
return archetype_preferences.get(self.config.archetype, ["observation", "logical"])
|
| 184 |
+
|
| 185 |
+
def receive_argument(self, argument: DeliberationTurn, impact: float = 0.0) -> None:
|
| 186 |
+
"""Process an argument from another juror.
|
| 187 |
+
|
| 188 |
+
Args:
|
| 189 |
+
argument: The argument that was made.
|
| 190 |
+
impact: Pre-calculated conviction change.
|
| 191 |
+
"""
|
| 192 |
+
# Create memory of argument
|
| 193 |
+
arg_memory = ArgumentMemory(
|
| 194 |
+
speaker_id=argument.speaker_id,
|
| 195 |
+
content_summary=argument.content[:200], # Truncate for memory
|
| 196 |
+
argument_type=argument.argument_type,
|
| 197 |
+
persuasiveness=abs(impact),
|
| 198 |
+
round_heard=argument.round_number,
|
| 199 |
+
)
|
| 200 |
+
self.memory.add_argument(arg_memory)
|
| 201 |
+
|
| 202 |
+
# Update conviction
|
| 203 |
+
self.memory.update_conviction(impact)
|
| 204 |
+
|
| 205 |
+
def set_initial_conviction(self, case: CriminalCase) -> float:
|
| 206 |
+
"""Set initial conviction based on case and archetype.
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
case: The case being deliberated.
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
Initial conviction score (0.0 to 1.0).
|
| 213 |
+
"""
|
| 214 |
+
# Base conviction by case difficulty
|
| 215 |
+
if case.difficulty == "clear_guilty":
|
| 216 |
+
base = 0.7
|
| 217 |
+
elif case.difficulty == "clear_innocent":
|
| 218 |
+
base = 0.3
|
| 219 |
+
else:
|
| 220 |
+
base = 0.5
|
| 221 |
+
|
| 222 |
+
# Adjust by initial lean
|
| 223 |
+
lean = self.config.initial_lean
|
| 224 |
+
if lean == "prosecution":
|
| 225 |
+
base += 0.15
|
| 226 |
+
elif lean == "defense":
|
| 227 |
+
base -= 0.15
|
| 228 |
+
elif lean == "minority":
|
| 229 |
+
# Will be set based on majority later
|
| 230 |
+
pass
|
| 231 |
+
elif lean == "random":
|
| 232 |
+
base += random.uniform(-0.2, 0.2)
|
| 233 |
+
|
| 234 |
+
# Add some individual variance
|
| 235 |
+
base += random.gauss(0, 0.1 * self.config.volatility)
|
| 236 |
+
|
| 237 |
+
# Clamp to valid range
|
| 238 |
+
self.memory.current_conviction = max(0.0, min(1.0, base))
|
| 239 |
+
self.memory.conviction_history.append(self.memory.current_conviction)
|
| 240 |
+
|
| 241 |
+
return self.memory.current_conviction
|
| 242 |
+
|
| 243 |
+
def get_vote(self) -> str:
|
| 244 |
+
"""Get current vote based on conviction."""
|
| 245 |
+
return self.memory.get_current_vote()
|
agents/config_loader.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Load juror configurations from YAML."""
|
| 2 |
+
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import Any
|
| 5 |
+
|
| 6 |
+
import yaml
|
| 7 |
+
|
| 8 |
+
from core.models import JurorConfig
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def load_juror_configs(config_path: str | Path | None = None) -> list[JurorConfig]:
|
| 12 |
+
"""Load all juror configurations from YAML file.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
config_path: Path to jurors.yaml. Defaults to agents/configs/jurors.yaml.
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
List of JurorConfig objects.
|
| 19 |
+
"""
|
| 20 |
+
if config_path is None:
|
| 21 |
+
config_path = Path(__file__).parent / "configs" / "jurors.yaml"
|
| 22 |
+
|
| 23 |
+
config_path = Path(config_path)
|
| 24 |
+
if not config_path.exists():
|
| 25 |
+
raise FileNotFoundError(f"Juror config file not found: {config_path}")
|
| 26 |
+
|
| 27 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
| 28 |
+
data = yaml.safe_load(f)
|
| 29 |
+
|
| 30 |
+
configs = []
|
| 31 |
+
for juror_data in data.get("jurors", []):
|
| 32 |
+
config = JurorConfig(
|
| 33 |
+
juror_id=juror_data["juror_id"],
|
| 34 |
+
seat_number=juror_data["seat_number"],
|
| 35 |
+
name=juror_data["name"],
|
| 36 |
+
emoji=juror_data["emoji"],
|
| 37 |
+
archetype=juror_data["archetype"],
|
| 38 |
+
personality_prompt=juror_data["personality_prompt"],
|
| 39 |
+
stubbornness=juror_data["stubbornness"],
|
| 40 |
+
volatility=juror_data["volatility"],
|
| 41 |
+
influence=juror_data["influence"],
|
| 42 |
+
verbosity=juror_data.get("verbosity", 0.5),
|
| 43 |
+
initial_lean=juror_data.get("initial_lean", "neutral"),
|
| 44 |
+
)
|
| 45 |
+
configs.append(config)
|
| 46 |
+
|
| 47 |
+
return configs
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def get_juror_config(juror_id: str, configs: list[JurorConfig] | None = None) -> JurorConfig | None:
|
| 51 |
+
"""Get a specific juror config by ID.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
juror_id: The juror ID to find.
|
| 55 |
+
configs: Optional list of configs. If None, loads from default path.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
JurorConfig if found, None otherwise.
|
| 59 |
+
"""
|
| 60 |
+
if configs is None:
|
| 61 |
+
configs = load_juror_configs()
|
| 62 |
+
|
| 63 |
+
for config in configs:
|
| 64 |
+
if config.juror_id == juror_id:
|
| 65 |
+
return config
|
| 66 |
+
|
| 67 |
+
return None
|