Example Projects
This section provides complete example projects using Casino of Life. Each example demonstrates different aspects of the framework and can serve as a starting point for your own projects.
Project 1: Basic Liu Kang Trainer
Create a basic agent that learns to play as Liu Kang with an aggressive style.
from casino_of_life.agents import DynamicAgent, CaballoLoko
from casino_of_life.environment import RetroEnv
from casino_of_life.reward_evaluators import BasicRewardEvaluator, SpecialMoveRewardEvaluator, MultiObjectiveRewardEvaluator
# Initialize environment
env = RetroEnv(
game='MortalKombatII-Genesis',
state='tournament',
players=2,
character='LiuKang'
)
# Create custom reward system
basic_rewards = BasicRewardEvaluator(
health_reward=1.0,
damage_penalty=-1.0,
hit_reward=0.5
)
# Encourage special moves
special_moves = SpecialMoveRewardEvaluator(
moves={
"fireball": 1.0,
"flying_kick": 1.5,
"bicycle_kick": 2.0
},
successful_hit_multiplier=2.0
)
# Combine reward evaluators
reward_system = MultiObjectiveRewardEvaluator([
basic_rewards,
special_moves
])
# Create agent
agent = DynamicAgent(
env=env,
reward_evaluator=reward_system,
policy='PPO',
learning_rate=0.0003,
frame_stack=4
)
# Train the agent
agent.train(timesteps=100000)
# Save the trained agent
agent.save("liu_kang_aggressive")
# Test the agent
agent.evaluate(episodes=10)
Project 2: Natural Language Training Assistant
This example demonstrates how to use CaballoLoko to guide your training with natural language.
from casino_of_life.agents import DynamicAgent, CaballoLoko
from casino_of_life.environment import RetroEnv
from casino_of_life.training import NaturalLanguageTrainer
# Initialize CaballoLoko
caballo_loko = CaballoLoko()
# Create environment
env = RetroEnv(
game='MortalKombatII-Genesis',
state='tournament',
players=2
)
# Create dynamic agent
agent = DynamicAgent(env=env)
# Create natural language trainer
nl_trainer = NaturalLanguageTrainer(
agent=agent,
assistant=caballo_loko
)
# Interactive training session
instructions = [
"Let's train Scorpion to be very aggressive",
"Focus on using his spear move to pull opponents in",
"Add a combo after successful spear pulls",
"Make him more defensive when his health is low",
"Improve his jumping attacks",
"Teach him to avoid projectiles by teleporting",
"Now create a complete strategy combining all these elements"
]
# Execute training based on natural language instructions
for instruction in instructions:
print(f"\nInstruction: {instruction}")
response = nl_trainer.train_with_instruction(instruction, timesteps=20000)
print(f"Response: {response}")
print(f"Current win rate: {nl_trainer.evaluate(episodes=10):.2f}")
# Save the fully trained agent
agent.save("scorpion_complete")
Project 3: Web Dashboard for Training Visualization
This project sets up a complete web dashboard for monitoring your agent's training progress.
from casino_of_life.agents import DynamicAgent
from casino_of_life.environment import RetroEnv
from casino_of_life.web import TrainingServer, DashboardConfig
from casino_of_life.training import TrainingManager
import threading
# Configure dashboard
dashboard_config = DashboardConfig(
theme="dark",
default_view="performance",
refresh_rate=1,
custom_metrics=["combo_count", "special_move_frequency", "reaction_time"]
)
# Start web server in a separate thread
server = TrainingServer(dashboard_config=dashboard_config)
server_thread = threading.Thread(target=server.start)
server_thread.daemon = True
server_thread.start()
# Create environment and agent
env = RetroEnv(
game='MortalKombatII-Genesis',
state='tournament',
players=2,
character='SubZero'
)
agent = DynamicAgent(
env=env,
policy='PPO',
learning_rate=0.0003
)
# Register agent with web server
server.register_agent(agent, "SubZero_Trainer")
# Create training manager with web hooks
training_manager = TrainingManager(
agent=agent,
web_hooks={
"server": server,
"agent_id": "SubZero_Trainer",
"update_frequency": 100 # Update web dashboard every 100 steps
}
)
# Start training with visualization
training_manager.train(
timesteps=500000,
eval_frequency=10000,
save_frequency=50000
)
print("Training complete! Dashboard will remain active until program is terminated.")
print("Access dashboard at http://localhost:8080")
Project 4: Multi-Character Tournament
This project trains multiple character agents and pits them against each other in a tournament.
from casino_of_life.agents import DynamicAgent
from casino_of_life.environment import RetroEnv
from casino_of_life.evaluation import TournamentEvaluator
from casino_of_life.training import MultiAgentTrainer
# List of characters to train
characters = ["LiuKang", "Scorpion", "SubZero", "Raiden", "JohnnyCage", "KungLao"]
# Create environment
base_env = RetroEnv(
game='MortalKombatII-Genesis',
state='tournament',
players=2
)
# Train agents for each character
agents = {}
for character in characters:
print(f"Training agent for {character}...")
# Create character-specific environment
env = base_env.clone(character=character)
# Create agent
agent = DynamicAgent(
env=env,
name=f"{character}_Agent",
policy='PPO'
)
# Train agent (brief training for example purposes)
agent.train(timesteps=50000)
# Store agent
agents[character] = agent
print(f"Completed training for {character}")
# Create tournament evaluator
evaluator = TournamentEvaluator(
agents=list(agents.values()),
matches_per_pair=5,
evaluation_metrics=["win_rate", "damage_dealt", "combo_frequency"]
)
# Run tournament
print("\nRunning tournament...")
results = evaluator.run_tournament()
# Display results
print("\nTournament Results:")
evaluator.display_results()
# Save all agents
for character, agent in agents.items():
agent.save(f"{character}_tournament")
print("\nAll agents saved successfully.")
Project 5: Advanced Self-Play Training with Curriculum
This project implements a sophisticated self-play training regime with curriculum learning.
from casino_of_life.agents import DynamicAgent
from casino_of_life.environment import RetroEnv
from casino_of_life.training import SelfPlayTrainer, CurriculumTrainer
from casino_of_life.reward_evaluators import MultiObjectiveRewardEvaluator, BasicRewardEvaluator
# Create environment
env = RetroEnv(
game='MortalKombatII-Genesis',
state='tournament',
players=2,
character='Scorpion'
)
# Create reward system
reward_system = MultiObjectiveRewardEvaluator([
BasicRewardEvaluator(health_reward=1.0, damage_penalty=-1.0)
])
# Create initial agent
agent = DynamicAgent(
env=env,
reward_evaluator=reward_system,
policy='PPO',
learning_rate=0.0003
)
# Define curriculum stages for self-play
curriculum = [
{
"name": "initial_training",
"opponent": "random", # Random actions opponent
"timesteps": 50000,
"success_metric": "win_rate",
"success_threshold": 0.8
},
{
"name": "self_play_easy",
"opponent": "self_historical", # Play against saved versions
"timesteps": 100000,
"opponent_sampling": {
"latest_model_probability": 0.3,
"random_historical_probability": 0.7
},
"success_metric": "win_rate",
"success_threshold": 0.6
},
{
"name": "self_play_medium",
"opponent": "self_latest", # Play against recent version
"timesteps": 150000,
"success_metric": "win_rate",
"success_threshold": 0.55
},
{
"name": "self_play_hard",
"opponent": "self_ensemble", # Play against ensemble of past versions
"timesteps": 200000,
"success_metric": "win_rate",
"success_threshold": 0.52
}
]
# Create curriculum trainer with self-play
trainer = CurriculumTrainer(
agent=agent,
curriculum=curriculum,
evaluation_frequency=10000,
save_frequency=20000
)
# Start training
print("Starting advanced self-play curriculum training...")
trainer.train()
# Final evaluation
print("\nFinal Evaluation:")
final_stats = agent.evaluate(episodes=50, detailed=True)
for metric, value in final_stats.items():
print(f"{metric}: {value}")
# Save final agent
agent.save("scorpion_advanced_selfplay")
print("\nTraining complete! Final model saved.")
Each of these examples demonstrates different aspects of the Casino of Life framework. You can use them as starting points and modify them to suit your specific needs. Remember to check the relevant documentation sections for more details on each component.
Last updated