Quick Start
from qubots import execute_playground_optimization
import qubots.rastion as rastion
# Authenticate
rastion.authenticate("your_api_token")
# Run optimization in cloud
result = execute_playground_optimization(
problem_name="tsp_problem",
problem_username="examples",
optimizer_name="genetic_tsp",
optimizer_username="algorithms",
problem_params={"n_cities": 50},
optimizer_params={"population_size": 100, "generations": 500}
)
print(f"Success: {result['success']}")
print(f"Best value: {result['best_value']}")
print(f"Execution time: {result['execution_time']:.2f}s")
Key Functions
execute_playground_optimization()
The main function for cloud execution with real-time logging:
from qubots import execute_playground_optimization
# Define logging callback for real-time output
def log_callback(level, message, source):
print(f"[{level.upper()}] [{source}] {message}")
result = execute_playground_optimization(
problem_name="maxcut_problem",
problem_username="community",
optimizer_name="genetic_maxcut",
optimizer_username="algorithms",
problem_params={"n_vertices": 20, "density": 0.3},
optimizer_params={"population_size": 100, "generations": 200},
log_callback=log_callback
)
# Access results
print(f"Success: {result['success']}")
if result['success']:
print(f"Best value: {result['best_value']}")
print(f"Runtime: {result['execution_time']:.2f}s")
print(f"Iterations: {result.get('iterations', 'N/A')}")
PlaygroundExecutor Class
For direct control with real-time monitoring:
from qubots import PlaygroundExecutor
# Create executor with callbacks
def progress_callback(message, progress):
print(f"Progress: {message} ({progress:.1f}%)")
def log_callback(level, message, source):
print(f"[{level}] {source}: {message}")
executor = PlaygroundExecutor(
progress_callback=progress_callback,
log_callback=log_callback
)
# Execute optimization
result = executor.execute_optimization(
problem_name="tsp_kroA100",
problem_username="benchmarks",
optimizer_name="genetic_tsp",
optimizer_username="algorithms",
problem_params={"n_cities": 100},
optimizer_params={"population_size": 200, "generations": 1000}
)
# Process results
if result.success:
print(f"Best value: {result.best_value}")
print(f"Solution: {result.best_solution}")
print(f"Metadata: {result.metadata}")
Common Use Cases
Parameter Sweeps
Test multiple parameter combinations in the cloud:
# Test different population sizes
population_sizes = [50, 100, 200, 500]
results = []
for pop_size in population_sizes:
result = execute_playground_optimization(
problem_name="tsp_berlin52",
problem_username="benchmarks",
optimizer_name="genetic_tsp",
optimizer_username="algorithms",
optimizer_params={
"population_size": pop_size,
"generations": 1000
}
)
if result['success']:
results.append((pop_size, result['best_value']))
# Find best configuration
if results:
best_pop, best_value = min(results, key=lambda x: x[1])
print(f"Best population size: {best_pop} (value: {best_value})")
Local vs Cloud Comparison
Test locally first, then validate in cloud:
from qubots import AutoProblem, AutoOptimizer
# Local execution
problem = AutoProblem.from_repo("examples/tsp")
optimizer = AutoOptimizer.from_repo("examples/genetic_tsp")
local_result = optimizer.optimize(problem)
# Cloud execution with same parameters
cloud_result = execute_playground_optimization(
problem_name="tsp_problem",
problem_username="examples",
optimizer_name="genetic_tsp",
optimizer_username="examples",
problem_params={"n_cities": 50},
optimizer_params={"population_size": 100, "generations": 500}
)
print(f"Local: {local_result.best_value:.4f}")
if cloud_result['success']:
print(f"Cloud: {cloud_result['best_value']:.4f}")
else:
print(f"Cloud execution failed: {cloud_result.get('error_message', 'Unknown error')}")
Directory-based Execution
Load models from local directories:
# Execute using local model directories
result = execute_playground_optimization(
problem_dir="./my_problem",
optimizer_dir="./my_optimizer",
problem_params={"n_cities": 50},
optimizer_params={"population_size": 100}
)
print(f"Success: {result['success']}")
if result['success']:
print(f"Best value: {result['best_value']}")
print(f"Dashboard data: {result.get('dashboard', {})}")
Get parameter information for dynamic UIs:
from qubots.playground_integration import get_model_parameter_schema
# Get parameter schema for a model
schema = get_model_parameter_schema("genetic_tsp", "algorithms")
print(f"Model type: {schema['model_type']}")
print(f"Parameters:")
for param_name, param_info in schema['parameters'].items():
print(f" {param_name}: {param_info['type']} ({'required' if param_info['required'] else 'optional'})")
if 'default' in param_info:
print(f" Default: {param_info['default']}")
if 'minimum' in param_info and 'maximum' in param_info:
print(f" Range: [{param_info['minimum']}, {param_info['maximum']}]")
Error Handling
Handle execution errors gracefully:
result = execute_playground_optimization(
problem_name="complex_problem",
problem_username="research",
optimizer_name="experimental_optimizer",
optimizer_username="lab"
)
if not result['success']:
print(f"Execution failed: {result.get('error_message', 'Unknown error')}")
print(f"Error type: {result.get('error_type', 'Unknown')}")
# Check for specific error patterns
error_msg = result.get('error_message', '')
if 'authentication' in error_msg.lower():
print("Check your API token")
elif 'timeout' in error_msg.lower():
print("Try reducing problem size or increasing timeout")
elif 'not found' in error_msg.lower():
print("Check model names and usernames")
else:
print("Execution successful!")
Integration with Benchmarking
Use playground for large-scale benchmarking:
from qubots import BenchmarkSuite
# Create benchmark problems
problems = ["tsp_berlin52", "tsp_kroA100", "tsp_lin318"]
optimizers = ["genetic_tsp", "simulated_annealing", "ortools_tsp"]
# Run cloud benchmarks
results = []
for problem in problems:
for optimizer in optimizers:
result = execute_playground_optimization(
problem_name=problem,
problem_username="benchmarks",
optimizer_name=optimizer,
optimizer_username="algorithms"
)
if result['success']:
results.append({
"problem": problem,
"optimizer": optimizer,
"best_value": result['best_value'],
"runtime": result['execution_time']
})
# Analyze benchmark results
import pandas as pd
df = pd.DataFrame(results)
summary = df.groupby(['problem', 'optimizer']).agg({
'best_value': ['mean', 'std'],
'runtime': ['mean', 'std']
}).round(4)
print(summary)
Next Steps