Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot] 838909c40c
Bump github.com/golang-jwt/jwt/v5 from 5.2.2 to 5.3.0 6 months ago
  1. 1
      .gitignore
  2. 326
      data/levels.json
  3. 6
      go.mod
  4. 12
      go.sum
  5. 2
      internal/auth/auth.go
  6. 13
      internal/design/design.go
  7. 71
      internal/level/level.go
  8. 8
      internal/level/levels_test.go
  9. 194
      internal/simulation/cache.go
  10. 319
      internal/simulation/cache_test.go
  11. 2
      internal/simulation/cdn.go
  12. 2
      internal/simulation/cdn_test.go
  13. 61
      internal/simulation/database.go
  14. 139
      internal/simulation/database_test.go
  15. 203
      internal/simulation/datapipeline.go
  16. 396
      internal/simulation/datapipeline_test.go
  17. 22
      internal/simulation/engine.go
  18. 863
      internal/simulation/engine_test.go
  19. 115
      internal/simulation/messagequeue.go
  20. 329
      internal/simulation/messagequeue_test.go
  21. 241
      internal/simulation/microservice.go
  22. 286
      internal/simulation/microservice_test.go
  23. 221
      internal/simulation/monitoring.go
  24. 411
      internal/simulation/monitoring_test.go
  25. 55
      internal/simulation/testdata/cache_design.json
  26. 35
      internal/simulation/testdata/database_design.json
  27. 188
      internal/simulation/testdata/datapipeline_design.json
  28. 53
      internal/simulation/testdata/messagequeue_design.json
  29. 96
      internal/simulation/testdata/microservice_design.json
  30. 127
      internal/simulation/testdata/monitoring_design.json
  31. 2
      internal/simulation/testdata/simple_design.json
  32. 164
      internal/simulation/testdata/thirdpartyservice_design.json
  33. 219
      internal/simulation/thirdpartyservice.go
  34. 382
      internal/simulation/thirdpartyservice_test.go
  35. 18
      internal/simulation/user.go
  36. 22
      internal/simulation/webserver.go
  37. 93
      router/handlers/chat.go
  38. 38
      router/handlers/game.go
  39. 133
      router/handlers/results.go
  40. 552
      router/handlers/simulation.go
  41. 5
      router/router.go
  42. 319
      static/app.js
  43. 306
      static/canvas.html
  44. 12
      static/challenges.html
  45. 41
      static/chat.html
  46. 389
      static/commands.js
  47. 20
      static/connection.js
  48. 13
      static/difficulty-select.html
  49. 54
      static/failure.html
  50. 4
      static/game-mode.css
  51. 380
      static/game.html
  52. 19
      static/header.html
  53. 134
      static/index.html
  54. 359
      static/node.js
  55. 411
      static/observers.js
  56. 4
      static/pluginRegistry.js
  57. 4
      static/plugins/database.js
  58. 3
      static/plugins/messageQueue.js
  59. 4
      static/plugins/microservice.js
  60. 4
      static/plugins/monitorAlerting.js
  61. 4
      static/plugins/webserver.js
  62. 156
      static/states/CanvasState.js
  63. 161
      static/states/CanvasStateMachine.js
  64. 113
      static/states/ConnectionState.js
  65. 79
      static/states/DesignState.js
  66. 831
      static/style.css
  67. 42
      static/success.html

1
.gitignore vendored

@ -1,3 +1,2 @@
.env .env
/tmp /tmp
systemdesigngame

326
data/levels.json

@ -1,10 +1,40 @@
[ [
{ {
"id": "url-shortener", "id": "url-shortener-easy",
"name": "URL Shortener",
"description": "Build a basic service to shorten URLs with a single backend.",
"difficulty": "easy",
"targetRps": 100,
"durationSec": 60,
"maxMonthlyUsd": 100,
"maxP95LatencyMs": 200,
"requiredAvailabilityPct": 99.0,
"mustInclude": ["database"],
"hints": ["Start with a basic backend and persistent storage."],
"interviewerRequirements": [
"Users should be able to shorten a URL via a basic web interface or API.",
"Each shortened URL should redirect to the original URL.",
"Data should be persisted so links remain valid after a restart."
],
"functionalRequirements": [
"Must include a database to persist mappings"
],
"nonFunctionalRequirements": [
"Target RPS: 100",
"Max P95 latency: 200ms",
"Required availability: 99.0%",
"Max monthly cost: $100",
"Simulation duration: 60 seconds"
]
},
{
"id": "url-shortener-medium",
"name": "URL Shortener", "name": "URL Shortener",
"description": "Scale your URL shortener to handle traffic spikes and ensure high availability.", "description": "Scale your URL shortener to handle traffic spikes and ensure high availability.",
"difficulty": "medium",
"targetRps": 1000, "targetRps": 1000,
"durationSec": 180, "durationSec": 180,
"maxMonthlyUsd": 300,
"maxP95LatencyMs": 150, "maxP95LatencyMs": 150,
"requiredAvailabilityPct": 99.9, "requiredAvailabilityPct": 99.9,
"mustInclude": ["database", "loadBalancer"], "mustInclude": ["database", "loadBalancer"],
@ -23,15 +53,78 @@
"Target RPS: 1000", "Target RPS: 1000",
"Max P95 latency: 150ms", "Max P95 latency: 150ms",
"Required availability: 99.9%", "Required availability: 99.9%",
"Max monthly cost: $300",
"Simulation duration: 180 seconds" "Simulation duration: 180 seconds"
] ]
}, },
{ {
"id": "chat-app", "id": "url-shortener-hard",
"name": "URL Shortener",
"description": "Design a globally distributed URL shortening service with low latency and high availability.",
"difficulty": "hard",
"targetRps": 10000,
"durationSec": 300,
"maxMonthlyUsd": 1000,
"maxP95LatencyMs": 100,
"requiredAvailabilityPct": 99.99,
"mustInclude": ["cdn", "database"],
"encouragedComponents": ["cache", "messageQueue"],
"hints": ["Think about write-path consistency and global replication."],
"interviewerRequirements": [
"The service must support globally distributed traffic with low latency.",
"Users across the world should get fast redirects via local CDN edge nodes.",
"Writes (shorten requests) should be consistent and durable."
],
"functionalRequirements": [
"Must include a CDN and a database",
"Encouraged to include a cache and a message queue",
"Writes should pass through a queue before hitting storage (eventual consistency)"
],
"nonFunctionalRequirements": [
"Target RPS: 10000",
"Max P95 latency: 100ms",
"Required availability: 99.99%",
"Max monthly cost: $1000",
"Simulation duration: 300 seconds"
]
},
{
"id": "chat-app-easy",
"name": "Chat App",
"description": "Implement a simple chat app for small group communication.",
"difficulty": "easy",
"targetRps": 50,
"durationSec": 120,
"maxMonthlyUsd": 150,
"maxP95LatencyMs": 300,
"requiredAvailabilityPct": 99.0,
"mustInclude": ["webserver", "database"],
"hints": ["You don’t need to persist every message yet."],
"interviewerRequirements": [
"Users should be able to send and receive messages in real-time.",
"Messages should be stored to support reloading the page without data loss.",
"A basic frontend should connect to a backend service."
],
"functionalRequirements": [
"Must include a webserver and a database"
],
"nonFunctionalRequirements": [
"Target RPS: 50",
"Max P95 latency: 300ms",
"Required availability: 99.0%",
"Max monthly cost: $150",
"Simulation duration: 120 seconds"
]
},
{
"id": "chat-app-medium",
"name": "Chat App", "name": "Chat App",
"description": "Support real-time chat across mobile and web, with message persistence.", "description": "Support real-time chat across mobile and web, with message persistence.",
"difficulty": "medium",
"targetRps": 500, "targetRps": 500,
"durationSec": 300, "durationSec": 300,
"maxMonthlyUsd": 500,
"maxP95LatencyMs": 200, "maxP95LatencyMs": 200,
"requiredAvailabilityPct": 99.9, "requiredAvailabilityPct": 99.9,
"mustInclude": ["webserver", "database", "messageQueue"], "mustInclude": ["webserver", "database", "messageQueue"],
@ -50,15 +143,78 @@
"Target RPS: 500", "Target RPS: 500",
"Max P95 latency: 200ms", "Max P95 latency: 200ms",
"Required availability: 99.9%", "Required availability: 99.9%",
"Max monthly cost: $500",
"Simulation duration: 300 seconds"
]
},
{
"id": "chat-app-hard",
"name": "Chat App",
"description": "Design a Slack-scale chat platform supporting typing indicators, read receipts, and delivery guarantees.",
"difficulty": "hard",
"targetRps": 5000,
"durationSec": 600,
"maxMonthlyUsd": 1500,
"maxP95LatencyMs": 100,
"requiredAvailabilityPct": 99.99,
"mustInclude": ["messageQueue", "database"],
"discouragedComponents": ["single-instance webserver"],
"hints": ["Think about pub/sub, retries, and ordering guarantees."],
"interviewerRequirements": [
"Messages must support delivery guarantees and deduplication.",
"Users must receive typing indicators and read receipts in near-real-time.",
"System must scale horizontally and tolerate node failures."
],
"functionalRequirements": [
"Must include a message queue and database",
"Discouraged from using a single-instance webserver",
"Encouraged to use a publish/subscribe system for fan-out"
],
"nonFunctionalRequirements": [
"Target RPS: 5000",
"Max P95 latency: 100ms",
"Required availability: 99.99%",
"Max monthly cost: $1500",
"Simulation duration: 600 seconds"
]
},
{
"id": "netflix-easy",
"name": "Netflix Clone",
"description": "Build a basic video streaming service with direct file access.",
"difficulty": "easy",
"targetRps": 200,
"durationSec": 300,
"maxMonthlyUsd": 500,
"maxP95LatencyMs": 500,
"requiredAvailabilityPct": 99.0,
"mustInclude": ["cdn"],
"hints": ["You don’t need full-blown adaptive streaming yet."],
"interviewerRequirements": [
"Users should be able to request and stream a video file.",
"Content should be served via a CDN to reduce latency and bandwidth cost.",
"Playback does not require adaptive streaming."
],
"functionalRequirements": [
"Must include a CDN to serve static video content"
],
"nonFunctionalRequirements": [
"Target RPS: 200",
"Max P95 latency: 500ms",
"Required availability: 99.0%",
"Max monthly cost: $500",
"Simulation duration: 300 seconds" "Simulation duration: 300 seconds"
] ]
}, },
{ {
"id": "netflix-clone", "id": "netflix-medium",
"name": "Netflix Clone", "name": "Netflix Clone",
"description": "Add video transcoding, caching, and recommendations.", "description": "Add video transcoding, caching, and recommendations.",
"difficulty": "medium",
"targetRps": 1000, "targetRps": 1000,
"durationSec": 600, "durationSec": 600,
"maxMonthlyUsd": 2000,
"maxP95LatencyMs": 300, "maxP95LatencyMs": 300,
"requiredAvailabilityPct": 99.9, "requiredAvailabilityPct": 99.9,
"mustInclude": ["cdn", "data pipeline", "cache"], "mustInclude": ["cdn", "data pipeline", "cache"],
@ -77,15 +233,78 @@
"Target RPS: 1000", "Target RPS: 1000",
"Max P95 latency: 300ms", "Max P95 latency: 300ms",
"Required availability: 99.9%", "Required availability: 99.9%",
"Max monthly cost: $2000",
"Simulation duration: 600 seconds" "Simulation duration: 600 seconds"
] ]
}, },
{ {
"id": "rate-limiter", "id": "netflix-hard",
"name": "Netflix Clone",
"description": "Design a globally resilient, multi-region Netflix-scale system with intelligent failover and real-time telemetry.",
"difficulty": "hard",
"targetRps": 10000,
"durationSec": 900,
"maxMonthlyUsd": 10000,
"maxP95LatencyMs": 200,
"requiredAvailabilityPct": 99.999,
"mustInclude": ["cdn", "data pipeline", "monitoring/alerting"],
"encouragedComponents": ["messageQueue", "cache", "third party service"],
"hints": ["You’ll need intelligent routing and fallback mechanisms."],
"interviewerRequirements": [
"Users worldwide should stream with minimal latency through regional CDN edge nodes.",
"The system must support failover between regions.",
"Real-time metrics and alerting must be integrated for proactive issue detection."
],
"functionalRequirements": [
"Must include a CDN, data pipeline, and monitoring/alerting",
"Encouraged to use cache and queue for async video processing",
"Encouraged to simulate third-party service integrations (e.g. payment, licensing)"
],
"nonFunctionalRequirements": [
"Target RPS: 10000",
"Max P95 latency: 200ms",
"Required availability: 99.999%",
"Max monthly cost: $10000",
"Simulation duration: 900 seconds"
]
},
{
"id": "rate-limiter-easy",
"name": "Rate Limiter",
"description": "Build a basic in-memory rate limiter for a single instance service.",
"difficulty": "easy",
"targetRps": 200,
"durationSec": 60,
"maxMonthlyUsd": 50,
"maxP95LatencyMs": 100,
"requiredAvailabilityPct": 99.0,
"mustInclude": ["webserver"],
"hints": ["Use an in-memory store and sliding window or token bucket."],
"interviewerRequirements": [
"Each client should be limited to N requests per minute.",
"Rate limits should be enforced in memory.",
"Only one instance is required—no cross-node coordination."
],
"functionalRequirements": [
"Must include a webserver that can reject requests over the configured RPS",
"Rate limiting must be enforced locally (no coordination with other nodes)"
],
"nonFunctionalRequirements": [
"Target RPS: 200",
"Max P95 latency: 100ms",
"Required availability: 99.0%",
"Max monthly cost: $50",
"Simulation duration: 60 seconds"
]
},
{
"id": "rate-limiter-medium",
"name": "Rate Limiter", "name": "Rate Limiter",
"description": "Design a rate limiter that works across multiple instances and enforces global quotas.", "description": "Design a rate limiter that works across multiple instances and enforces global quotas.",
"difficulty": "medium",
"targetRps": 1000, "targetRps": 1000,
"durationSec": 180, "durationSec": 180,
"maxMonthlyUsd": 300,
"maxP95LatencyMs": 50, "maxP95LatencyMs": 50,
"requiredAvailabilityPct": 99.9, "requiredAvailabilityPct": 99.9,
"mustInclude": ["webserver", "cache"], "mustInclude": ["webserver", "cache"],
@ -105,15 +324,78 @@
"Target RPS: 1000", "Target RPS: 1000",
"Max P95 latency: 50ms", "Max P95 latency: 50ms",
"Required availability: 99.9%", "Required availability: 99.9%",
"Max monthly cost: $300",
"Simulation duration: 180 seconds" "Simulation duration: 180 seconds"
] ]
}, },
{ {
"id": "metrics-system", "id": "rate-limiter-hard",
"name": "Rate Limiter",
"description": "Build a globally distributed rate limiter with per-user and per-region policies.",
"difficulty": "hard",
"targetRps": 5000,
"durationSec": 300,
"maxMonthlyUsd": 1000,
"maxP95LatencyMs": 30,
"requiredAvailabilityPct": 99.99,
"mustInclude": ["cache"],
"encouragedComponents": ["cdn", "data pipeline", "monitoring/alerting"],
"hints": ["Ensure low latency despite distributed state. Avoid single points of failure."],
"interviewerRequirements": [
"Each user must be rate-limited independently and consistently across regions.",
"The system should avoid global bottlenecks while maintaining quota correctness.",
"Should include real-time metrics and alerting on quota violations or system degradation."
],
"functionalRequirements": [
"Must include a cache that replicates or partitions rate-limit state regionally",
"Rate limiting should be enforced with user-scoped and region-scoped policies",
"Must simulate availability zones with failover and latency variance"
],
"nonFunctionalRequirements": [
"Target RPS: 5000",
"Max P95 latency: 30ms",
"Required availability: 99.99%",
"Max monthly cost: $1000",
"Simulation duration: 300 seconds"
]
},
{
"id": "metrics-system-easy",
"name": "Metrics System",
"description": "Create a basic system that collects and stores custom app metrics locally.",
"difficulty": "easy",
"targetRps": 100,
"durationSec": 120,
"maxMonthlyUsd": 100,
"maxP95LatencyMs": 200,
"requiredAvailabilityPct": 99.0,
"mustInclude": ["webserver", "database"],
"hints": ["Start by storing metrics as timestamped values in a simple DB."],
"interviewerRequirements": [
"Metrics should be received via HTTP and stored locally.",
"No external systems needed—simple write and read support.",
"Support querying metrics over a time range."
],
"functionalRequirements": [
"Must include a webserver to receive metric data",
"Must include a database to persist metrics with timestamps"
],
"nonFunctionalRequirements": [
"Target RPS: 100",
"Max P95 latency: 200ms",
"Required availability: 99.0%",
"Max monthly cost: $100",
"Simulation duration: 120 seconds"
]
},
{
"id": "metrics-system-medium",
"name": "Metrics System", "name": "Metrics System",
"description": "Design a pull-based metrics system like Prometheus that scrapes multiple services.", "description": "Design a pull-based metrics system like Prometheus that scrapes multiple services.",
"difficulty": "medium",
"targetRps": 1000, "targetRps": 1000,
"durationSec": 300, "durationSec": 300,
"maxMonthlyUsd": 500,
"maxP95LatencyMs": 100, "maxP95LatencyMs": 100,
"requiredAvailabilityPct": 99.9, "requiredAvailabilityPct": 99.9,
"mustInclude": ["data pipeline", "monitoring/alerting"], "mustInclude": ["data pipeline", "monitoring/alerting"],
@ -133,7 +415,39 @@
"Target RPS: 1000", "Target RPS: 1000",
"Max P95 latency: 100ms", "Max P95 latency: 100ms",
"Required availability: 99.9%", "Required availability: 99.9%",
"Max monthly cost: $500",
"Simulation duration: 300 seconds" "Simulation duration: 300 seconds"
] ]
},
{
"id": "metrics-system-hard",
"name": "Metrics System",
"description": "Build a scalable, multi-tenant metrics platform with real-time alerts and dashboard support.",
"difficulty": "hard",
"targetRps": 5000,
"durationSec": 600,
"maxMonthlyUsd": 1500,
"maxP95LatencyMs": 50,
"requiredAvailabilityPct": 99.99,
"mustInclude": ["monitoring/alerting", "data pipeline"],
"encouragedComponents": ["messageQueue", "cache", "third party service"],
"hints": ["Think about downsampling, alert thresholds, and dashboard queries."],
"interviewerRequirements": [
"Support multi-tenant metrics isolation and quota enforcement.",
"Enable real-time alerting with low-latency threshold evaluation.",
"Expose APIs for dashboards and custom queries."
],
"functionalRequirements": [
"Must include a data pipeline that can scale with RPS and tenants",
"Must include monitoring/alerting logic for low-latency threshold detection",
"Encouraged to buffer high-volume ingestion via message queues"
],
"nonFunctionalRequirements": [
"Target RPS: 5000",
"Max P95 latency: 50ms",
"Required availability: 99.99%",
"Max monthly cost: $1500",
"Simulation duration: 600 seconds"
]
} }
] ]

6
go.mod

@ -5,17 +5,13 @@ go 1.23.0
toolchain go1.23.10 toolchain go1.23.10
require ( require (
github.com/golang-jwt/jwt/v5 v5.2.2 github.com/golang-jwt/jwt/v5 v5.3.0
github.com/gorilla/mux v1.8.1
github.com/gorilla/websocket v1.5.3
github.com/joho/godotenv v1.5.1 github.com/joho/godotenv v1.5.1
github.com/potproject/claude-sdk-go v1.3.2
github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d
) )
require ( require (
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/coder/websocket v1.8.12 // indirect github.com/coder/websocket v1.8.12 // indirect
github.com/tmaxmax/go-sse v0.8.0 // indirect
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect
) )

12
go.sum

@ -2,18 +2,10 @@ github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo=
github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/potproject/claude-sdk-go v1.3.2 h1:n27wJoGbObQ0xcWucNI1f4RY29+4vAWfyblTLfOLmSk=
github.com/potproject/claude-sdk-go v1.3.2/go.mod h1:0cfNkl21VJGW/XZg+5VfP5eJVRRtRj24cHSBQ/lMNPA=
github.com/tmaxmax/go-sse v0.8.0 h1:pPpTgyyi1r7vG2o6icebnpGEh3ebcnBXqDWkb7aTofs=
github.com/tmaxmax/go-sse v0.8.0/go.mod h1:HLoxqxdH+7oSUItjtnpxjzJedfr/+Rrm/dNWBcTxJFM=
github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d h1:dOMI4+zEbDI37KGb0TI44GUAwxHF9cMsIoDTJ7UmgfU= github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d h1:dOMI4+zEbDI37KGb0TI44GUAwxHF9cMsIoDTJ7UmgfU=
github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d/go.mod h1:l8xTsYB90uaVdMHXMCxKKLSgw5wLYBwBKKefNIUnm9s= github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d/go.mod h1:l8xTsYB90uaVdMHXMCxKKLSgw5wLYBwBKKefNIUnm9s=
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw=

2
internal/auth/auth.go

@ -131,7 +131,7 @@ func CallbackHandler(w http.ResponseWriter, r *http.Request) {
Secure: true, // Set to true in production (HTTPS) Secure: true, // Set to true in production (HTTPS)
}) })
http.Redirect(w, r, "/play/chat-app", http.StatusFound) http.Redirect(w, r, "/play", http.StatusFound)
} }
func GenerateJWT(userID string, login string, avatarUrl string) (string, error) { func GenerateJWT(userID string, login string, avatarUrl string) (string, error) {

13
internal/design/design.go

@ -10,8 +10,8 @@ type Node struct {
} }
type Position struct { type Position struct {
X float64 `json:"x"` X int `json:"x"`
Y float64 `json:"y"` Y int `json:"y"`
} }
type Connection struct { type Connection struct {
@ -46,10 +46,8 @@ type CDN struct {
} }
type Database struct { type Database struct {
Label string `json:"label"` Label string `json:"label"`
Replication int `json:"replication"` Replication int `json:"replication"`
MaxRPS int `json:"maxRPS"`
BaseLatencyMs int `json:"baseLatencyMs"`
} }
type DataPipeline struct { type DataPipeline struct {
@ -67,14 +65,13 @@ type MessageQueue struct {
Label string `json:"label"` Label string `json:"label"`
QueueCapacity int `json:"queueCapacity"` QueueCapacity int `json:"queueCapacity"`
RetentionSeconds int `json:"retentionSeconds"` RetentionSeconds int `json:"retentionSeconds"`
ProcessingRate int `json:"processingRate"`
} }
type Microservice struct { type Microservice struct {
Label string `json:"label"` Label string `json:"label"`
InstanceCount int `json:"instanceCount"` InstanceCount int `json:"instanceCount"`
CPU int `json:"cpu"` CPU int `json:"cpu"`
RamGb int `json:"ramGb"` RAMGb int `json:"ramGb"`
RPSCapacity int `json:"rpsCapacity"` RPSCapacity int `json:"rpsCapacity"`
MonthlyUSD int `json:"monthlyUsd"` MonthlyUSD int `json:"monthlyUsd"`
ScalingStrategy string `json:"scalingStrategy"` ScalingStrategy string `json:"scalingStrategy"`

71
internal/level/level.go

@ -4,13 +4,14 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "os"
"slices" "strings"
) )
type Level struct { type Level struct {
ID string `json:"id"` ID string `json:"id"`
Name string `json:"name"` Name string `json:"name"`
Description string `json:"description"` Description string `json:"description"`
Difficulty Difficulty `json:"difficulty"`
TargetRPS int `json:"targetRps"` TargetRPS int `json:"targetRps"`
DurationSec int `json:"durationSec"` DurationSec int `json:"durationSec"`
@ -35,7 +36,15 @@ type Level struct {
NonFunctionalRequirements []string `json:"nonFunctionalRequirements,omitempty"` NonFunctionalRequirements []string `json:"nonFunctionalRequirements,omitempty"`
} }
var Registry map[string]Level type Difficulty string
const (
DifficultyEasy Difficulty = "easy"
DifficultyMedium Difficulty = "medium"
DifficultyHard Difficulty = "hard"
)
var Registry map[string]map[string]Level
type FailureEvent struct { type FailureEvent struct {
Type string `json:"type"` Type string `json:"type"`
@ -58,33 +67,53 @@ func LoadLevels(path string) ([]Level, error) {
} }
func InitRegistry(levels []Level) { func InitRegistry(levels []Level) {
Registry = make(map[string]Level) Registry = make(map[string]map[string]Level)
for _, lvl := range levels { for _, lvl := range levels {
Registry[lvl.ID] = lvl // check if level already exists here
normalized := strings.ToLower(lvl.Name)
if _, ok := Registry[normalized]; !ok {
Registry[normalized] = make(map[string]Level)
}
// populate it
Registry[normalized][string(lvl.Difficulty)] = lvl
} }
} }
func GetLevelByID(id string) (*Level, error) { func GetLevel(name string, difficulty Difficulty) (*Level, error) {
lvl, ok := Registry[id] name = strings.ToLower(name)
diffMap, ok := Registry[name]
if !ok { if !ok {
return nil, fmt.Errorf("level with ID %s not found", id) return nil, fmt.Errorf("level name %s not found", name)
}
lvl, ok := diffMap[string(difficulty)]
if !ok {
return nil, fmt.Errorf("difficulty %s not available for level '%s'", difficulty, name)
} }
return &lvl, nil return &lvl, nil
} }
func (d *Difficulty) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
switch s {
case string(DifficultyEasy), string(DifficultyMedium), string(DifficultyHard):
*d = Difficulty(s)
return nil
default:
return fmt.Errorf("invalid difficulty: %q", s)
}
}
func AllLevels() []Level { func AllLevels() []Level {
var levels []Level var levels []Level
for _, lvl := range Registry { for _, diffMap := range Registry {
levels = append(levels, lvl) for _, lvl := range diffMap {
} levels = append(levels, lvl)
slices.SortFunc(levels, func(i Level, j Level) int {
if i.Name < j.Name {
return -1
}
if i.Name > j.Name {
return 1
} }
return 0 }
})
return levels return levels
} }

8
internal/level/levels_test.go

@ -19,12 +19,12 @@ func TestLoadLevels(t *testing.T) {
InitRegistry(levels) InitRegistry(levels)
lvl, err := GetLevelByID("metrics-system") lvl, err := GetLevel("Metrics System", DifficultyHard)
if err != nil { if err != nil {
t.Fatalf("expected to retrieve metrics-system, got %v", err) t.Fatalf("expected to retrieve Metrics System (hard), got %v", err)
} }
if lvl.ID != "metrics-system" { if lvl.Difficulty != DifficultyHard {
t.Errorf("unexpected level ID: got %s, want %s", lvl.ID, "metrics-system") t.Errorf("unexpected difficulty: got %s, want %s", lvl.Difficulty, DifficultyHard)
} }
} }

194
internal/simulation/cache.go

@ -1,194 +0,0 @@
package simulation
import (
"fmt"
"hash/fnv"
"time"
)
type CacheLogic struct{}
// hash function to simulate URL patterns
func hash(s string) uint32 {
h := fnv.New32a()
h.Write([]byte(s))
return h.Sum32()
}
type CacheEntry struct {
Data string
Timestamp int
AccessTime int
AccessCount int
InsertOrder int
}
func (c CacheLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) {
// Extract cache properties
cacheTTL := int(AsFloat64(props["cacheTTL"]))
if cacheTTL == 0 {
cacheTTL = 300000 // default 5 minutes in ms
}
maxEntries := int(AsFloat64(props["maxEntries"]))
if maxEntries == 0 {
maxEntries = 1000 // default max entries
}
evictionPolicy := AsString(props["evictionPolicy"])
if evictionPolicy == "" {
evictionPolicy = "LRU" // default eviction policy
}
// Initialize cache data structures in props
cacheData, ok := props["_cacheData"].(map[string]*CacheEntry)
if !ok {
cacheData = make(map[string]*CacheEntry)
props["_cacheData"] = cacheData
}
insertCounter, ok := props["_insertCounter"].(int)
if !ok {
insertCounter = 0
}
// Current timestamp for this tick
currentTime := tick * 100 // assuming 100ms per tick
// Clean up expired entries first
c.cleanExpiredEntries(cacheData, currentTime, cacheTTL)
output := []*Request{}
for _, req := range queue {
// For URL shortener simulation, use hash of request ID to simulate repeated URL access
// This creates realistic cache patterns where some URLs are accessed multiple times
hashValue := hash(req.ID) % 100 // Create 100 possible "URLs"
cacheKey := fmt.Sprintf("url-%d-%s", hashValue, req.Type)
// Check for cache hit
entry, hit := cacheData[cacheKey]
if hit && !c.isExpired(entry, currentTime, cacheTTL) {
// Cache hit - return immediately with minimal latency
// Cache hit - served from cache component
reqCopy := *req
reqCopy.LatencyMS += 1 // 1ms for in-memory access
reqCopy.Path = append(reqCopy.Path, "cache-hit")
// Update access tracking for eviction policies
entry.AccessTime = currentTime
entry.AccessCount++
output = append(output, &reqCopy)
} else {
// Cache miss - forward request downstream
// Cache miss - forwarding to database
reqCopy := *req
reqCopy.Path = append(reqCopy.Path, "cache-miss")
// For simulation purposes, we'll cache the "response" immediately
// In a real system, this would happen when the response comes back
insertCounter++
newEntry := &CacheEntry{
Data: "cached-data", // In real implementation, this would be the response data
Timestamp: currentTime,
AccessTime: currentTime,
AccessCount: 1,
InsertOrder: insertCounter,
}
// First check if we need to evict before adding
if len(cacheData) >= maxEntries {
c.evictEntry(cacheData, evictionPolicy)
}
// Now add the new entry
cacheData[cacheKey] = newEntry
output = append(output, &reqCopy)
}
}
// Update insert counter in props
props["_insertCounter"] = insertCounter
return output, true
}
func (c CacheLogic) cleanExpiredEntries(cacheData map[string]*CacheEntry, currentTime, ttl int) {
for key, entry := range cacheData {
if c.isExpired(entry, currentTime, ttl) {
delete(cacheData, key)
}
}
}
func (c CacheLogic) isExpired(entry *CacheEntry, currentTime, ttl int) bool {
return (currentTime - entry.Timestamp) > ttl
}
func (c CacheLogic) evictEntry(cacheData map[string]*CacheEntry, policy string) {
if len(cacheData) == 0 {
return
}
var keyToEvict string
switch policy {
case "LRU":
// Evict least recently used
oldestTime := int(^uint(0) >> 1) // Max int
for key, entry := range cacheData {
if entry.AccessTime < oldestTime {
oldestTime = entry.AccessTime
keyToEvict = key
}
}
case "LFU":
// Evict least frequently used
minCount := int(^uint(0) >> 1) // Max int
for key, entry := range cacheData {
if entry.AccessCount < minCount {
minCount = entry.AccessCount
keyToEvict = key
}
}
case "FIFO":
// Evict first in (oldest insert order)
minOrder := int(^uint(0) >> 1) // Max int
for key, entry := range cacheData {
if entry.InsertOrder < minOrder {
minOrder = entry.InsertOrder
keyToEvict = key
}
}
case "random":
// Evict random entry
keys := make([]string, 0, len(cacheData))
for key := range cacheData {
keys = append(keys, key)
}
if len(keys) > 0 {
// Use timestamp as pseudo-random seed
seed := time.Now().UnixNano()
keyToEvict = keys[seed%int64(len(keys))]
}
default:
// Default to LRU
oldestTime := int(^uint(0) >> 1)
for key, entry := range cacheData {
if entry.AccessTime < oldestTime {
oldestTime = entry.AccessTime
keyToEvict = key
}
}
}
if keyToEvict != "" {
delete(cacheData, keyToEvict)
}
}

319
internal/simulation/cache_test.go

@ -1,319 +0,0 @@
package simulation
import (
"testing"
)
func TestCacheLogic_CacheHitMiss(t *testing.T) {
cache := CacheLogic{}
props := map[string]any{
"cacheTTL": 10000, // 10 seconds
"maxEntries": 100,
"evictionPolicy": "LRU",
}
// First request should be a miss
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0, Path: []string{"start"}}}
output1, alive := cache.Tick(props, req1, 1)
if !alive {
t.Errorf("Cache should be alive")
}
if len(output1) != 1 {
t.Errorf("Expected 1 output request, got %d", len(output1))
}
// Should be cache miss
if output1[0].LatencyMS != 0 { // No latency added for miss
t.Errorf("Expected 0ms latency for cache miss, got %dms", output1[0].LatencyMS)
}
// Check path contains cache-miss
found := false
for _, pathItem := range output1[0].Path {
if pathItem == "cache-miss" {
found = true
break
}
}
if !found {
t.Errorf("Expected cache-miss in path, got %v", output1[0].Path)
}
// Second identical request should be a hit
req2 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0, Path: []string{"start"}}}
output2, _ := cache.Tick(props, req2, 2)
if len(output2) != 1 {
t.Errorf("Expected 1 output request, got %d", len(output2))
}
// Should be cache hit with 1ms latency
if output2[0].LatencyMS != 1 {
t.Errorf("Expected 1ms latency for cache hit, got %dms", output2[0].LatencyMS)
}
// Check path contains cache-hit
found = false
for _, pathItem := range output2[0].Path {
if pathItem == "cache-hit" {
found = true
break
}
}
if !found {
t.Errorf("Expected cache-hit in path, got %v", output2[0].Path)
}
}
func TestCacheLogic_TTLExpiration(t *testing.T) {
cache := CacheLogic{}
props := map[string]any{
"cacheTTL": 1000, // 1 second
"maxEntries": 100,
"evictionPolicy": "LRU",
}
// First request - cache miss
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req1, 1)
// Second request within TTL - cache hit
req2 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output2, _ := cache.Tick(props, req2, 5) // 5 * 100ms = 500ms later
if output2[0].LatencyMS != 1 {
t.Errorf("Expected cache hit (1ms), got %dms", output2[0].LatencyMS)
}
// Third request after TTL expiration - cache miss
req3 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output3, _ := cache.Tick(props, req3, 15) // 15 * 100ms = 1500ms later (expired)
if output3[0].LatencyMS != 0 {
t.Errorf("Expected cache miss (0ms) after TTL expiration, got %dms", output3[0].LatencyMS)
}
}
func TestCacheLogic_MaxEntriesEviction(t *testing.T) {
cache := CacheLogic{}
props := map[string]any{
"cacheTTL": 10000,
"maxEntries": 2, // Small cache size
"evictionPolicy": "LRU",
}
// Add first entry
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req1, 1)
// Add second entry
req2 := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req2, 2)
// Verify both are cached
req1Check := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output1Check, _ := cache.Tick(props, req1Check, 3)
if output1Check[0].LatencyMS != 1 {
t.Errorf("Expected cache hit for req1, got %dms latency", output1Check[0].LatencyMS)
}
req2Check := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}}
output2Check, _ := cache.Tick(props, req2Check, 4)
if output2Check[0].LatencyMS != 1 {
t.Errorf("Expected cache hit for req2, got %dms latency", output2Check[0].LatencyMS)
}
// Add third entry (should evict LRU entry)
req3 := []*Request{{ID: "req3", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req3, 5)
// req1 was accessed at tick 3, req2 at tick 4, so req1 should be evicted
req1CheckAgain := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output1, _ := cache.Tick(props, req1CheckAgain, 6)
if output1[0].LatencyMS != 0 {
t.Errorf("Expected cache miss for LRU evicted entry, got %dms latency", output1[0].LatencyMS)
}
// After adding req1 back, the cache should be at capacity with different items
// We don't test further to avoid complex cascading eviction scenarios
}
func TestCacheLogic_LRUEviction(t *testing.T) {
cache := CacheLogic{}
props := map[string]any{
"cacheTTL": 10000,
"maxEntries": 2,
"evictionPolicy": "LRU",
}
// Add two entries
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req1, 1)
req2 := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req2, 2)
// Access first entry (make it recently used)
req1Access := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req1Access, 3)
// Add third entry (should evict req2, since req1 was more recently accessed)
req3 := []*Request{{ID: "req3", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req3, 4)
// Verify that req2 was evicted (should be cache miss)
req2Check := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}}
output2, _ := cache.Tick(props, req2Check, 5)
if output2[0].LatencyMS != 0 {
t.Errorf("Expected cache miss for LRU evicted entry, got %dms latency", output2[0].LatencyMS)
}
// After adding req2 back, the cache should contain {req2, req1} or {req2, req3}
// depending on LRU logic. We don't test further to avoid cascading evictions.
}
func TestCacheLogic_FIFOEviction(t *testing.T) {
cache := CacheLogic{}
props := map[string]any{
"cacheTTL": 10000,
"maxEntries": 2,
"evictionPolicy": "FIFO",
}
// Add two entries
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req1, 1)
req2 := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req2, 2)
// Access first entry multiple times (shouldn't matter for FIFO)
req1Access := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req1Access, 3)
cache.Tick(props, req1Access, 4)
// Add third entry (should evict req1, the first inserted)
req3 := []*Request{{ID: "req3", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req3, 5)
// Check that req1 was evicted (first in, first out)
req1Check := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output1, _ := cache.Tick(props, req1Check, 6)
if output1[0].LatencyMS != 0 {
t.Errorf("Expected cache miss for FIFO evicted entry, got %dms latency", output1[0].LatencyMS)
}
// After adding req1 back, the cache should contain {req2, req1} or {req3, req1}
// depending on FIFO logic. We don't test further to avoid cascading evictions.
}
func TestCacheLogic_DefaultValues(t *testing.T) {
cache := CacheLogic{}
// Empty props should use defaults
props := map[string]any{}
req := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output, _ := cache.Tick(props, req, 1)
if len(output) != 1 {
t.Errorf("Expected 1 output request")
}
// Should be cache miss with 0ms latency
if output[0].LatencyMS != 0 {
t.Errorf("Expected 0ms latency for cache miss with defaults, got %dms", output[0].LatencyMS)
}
// Second request should be cache hit
req2 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output2, _ := cache.Tick(props, req2, 2)
if output2[0].LatencyMS != 1 {
t.Errorf("Expected 1ms latency for cache hit, got %dms", output2[0].LatencyMS)
}
}
func TestCacheLogic_SimpleEviction(t *testing.T) {
cache := CacheLogic{}
props := map[string]any{
"cacheTTL": 10000,
"maxEntries": 1, // Only 1 entry allowed
"evictionPolicy": "LRU",
}
// Add first entry
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output1, _ := cache.Tick(props, req1, 1)
if output1[0].LatencyMS != 0 {
t.Errorf("First request should be cache miss, got %dms", output1[0].LatencyMS)
}
// Check it's cached
req1Again := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output1Again, _ := cache.Tick(props, req1Again, 2)
if output1Again[0].LatencyMS != 1 {
t.Errorf("Second request should be cache hit, got %dms", output1Again[0].LatencyMS)
}
// Add second entry (should evict first)
req2 := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}}
output2, _ := cache.Tick(props, req2, 3)
if output2[0].LatencyMS != 0 {
t.Errorf("New request should be cache miss, got %dms", output2[0].LatencyMS)
}
// Check that first entry is now evicted
req1Final := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output1Final, _ := cache.Tick(props, req1Final, 4)
if output1Final[0].LatencyMS != 0 {
t.Errorf("Evicted entry should be cache miss, got %dms", output1Final[0].LatencyMS)
}
// Check that second entry is now also evicted (since req1 was re-added in step 4)
req2Again := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}}
output2Again, _ := cache.Tick(props, req2Again, 5)
if output2Again[0].LatencyMS != 0 {
t.Errorf("Re-evicted entry should be cache miss, got %dms", output2Again[0].LatencyMS)
}
}
func TestCacheLogic_DifferentRequestTypes(t *testing.T) {
cache := CacheLogic{}
props := map[string]any{
"cacheTTL": 10000,
"maxEntries": 100,
"evictionPolicy": "LRU",
}
// Same ID but different type should be different cache entries
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
cache.Tick(props, req1, 1)
req2 := []*Request{{ID: "req1", Type: "POST", LatencyMS: 0}}
output2, _ := cache.Tick(props, req2, 2)
// Should be cache miss since different type
if output2[0].LatencyMS != 0 {
t.Errorf("Expected cache miss for different request type, got %dms latency", output2[0].LatencyMS)
}
// Original GET should still be cached
req1Again := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}}
output1, _ := cache.Tick(props, req1Again, 3)
if output1[0].LatencyMS != 1 {
t.Errorf("Expected cache hit for original request type, got %dms latency", output1[0].LatencyMS)
}
}

2
internal/simulation/cdn.go

@ -5,7 +5,7 @@ type CDNLogic struct{}
func (c CDNLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) { func (c CDNLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) {
// read the ttl for cached content // read the ttl for cached content
ttl := int(AsFloat64(props["ttl"])) ttl := int(AsFloat64(props["ttlMs"]))
// retrieve the cdn's cache from props // retrieve the cdn's cache from props
cache, ok := props["_cache"].(map[string]int) cache, ok := props["_cache"].(map[string]int)

2
internal/simulation/cdn_test.go

@ -9,7 +9,7 @@ func TestCDNLogic(t *testing.T) {
cdn := CDNLogic{} cdn := CDNLogic{}
cache := map[string]int{} // shared mutable cache cache := map[string]int{} // shared mutable cache
props := map[string]any{ props := map[string]any{
"ttl": float64(1000), "ttlMs": float64(1000),
"_cache": cache, "_cache": cache,
} }

61
internal/simulation/database.go

@ -1,61 +0,0 @@
package simulation
type DatabaseLogic struct{}
func (d DatabaseLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) {
// Extract database properties
replication := int(AsFloat64(props["replication"]))
if replication == 0 {
replication = 1 // default
}
// Database capacity (could be based on instance size or explicit RPS)
maxRPS := int(AsFloat64(props["maxRPS"]))
if maxRPS == 0 {
maxRPS = 1000 // default capacity
}
// Base latency for database operations
baseLatencyMs := int(AsFloat64(props["baseLatencyMs"]))
if baseLatencyMs == 0 {
baseLatencyMs = 10 // default 10ms for local DB operations
}
// Process requests up to capacity
toProcess := queue
if len(queue) > maxRPS {
toProcess = queue[:maxRPS]
// TODO: Could add queue overflow logic here
}
output := []*Request{}
for _, req := range toProcess {
// Add database latency to the request
reqCopy := *req
// Simulate different operation types and their latencies
operationLatency := baseLatencyMs
// Simple heuristic: reads are faster than writes
if req.Type == "GET" || req.Type == "READ" {
operationLatency = baseLatencyMs
} else if req.Type == "POST" || req.Type == "WRITE" {
operationLatency = baseLatencyMs * 2 // writes take longer
}
// Replication factor affects write latency
if req.Type == "POST" || req.Type == "WRITE" {
operationLatency += (replication - 1) * 5 // 5ms per replica
}
reqCopy.LatencyMS += operationLatency
reqCopy.Path = append(reqCopy.Path, "database-processed")
output = append(output, &reqCopy)
}
// Database health (could simulate failures, connection issues, etc.)
// For now, assume always healthy
return output, true
}

139
internal/simulation/database_test.go

@ -1,139 +0,0 @@
package simulation
import (
"testing"
)
func TestDatabaseLogic_BasicProcessing(t *testing.T) {
db := DatabaseLogic{}
props := map[string]any{
"replication": 2,
"maxRPS": 100,
"baseLatencyMs": 15,
}
// Create test requests
reqs := []*Request{
{ID: "req1", Type: "GET", LatencyMS: 0, Path: []string{"start"}},
{ID: "req2", Type: "POST", LatencyMS: 0, Path: []string{"start"}},
}
output, alive := db.Tick(props, reqs, 1)
if !alive {
t.Errorf("Database should be alive")
}
if len(output) != 2 {
t.Errorf("Expected 2 output requests, got %d", len(output))
}
// Check read latency (base latency)
readReq := output[0]
if readReq.LatencyMS != 15 {
t.Errorf("Expected read latency 15ms, got %dms", readReq.LatencyMS)
}
// Check write latency (base * 2 + replication penalty)
writeReq := output[1]
expectedWriteLatency := 15*2 + (2-1)*5 // 30 + 5 = 35ms
if writeReq.LatencyMS != expectedWriteLatency {
t.Errorf("Expected write latency %dms, got %dms", expectedWriteLatency, writeReq.LatencyMS)
}
}
func TestDatabaseLogic_CapacityLimit(t *testing.T) {
db := DatabaseLogic{}
props := map[string]any{
"maxRPS": 2,
"baseLatencyMs": 10,
}
// Create more requests than capacity
reqs := []*Request{
{ID: "req1", Type: "GET"},
{ID: "req2", Type: "GET"},
{ID: "req3", Type: "GET"}, // This should be dropped
}
output, _ := db.Tick(props, reqs, 1)
if len(output) != 2 {
t.Errorf("Expected capacity limit of 2, but processed %d requests", len(output))
}
}
func TestDatabaseLogic_DefaultValues(t *testing.T) {
db := DatabaseLogic{}
// Empty props should use defaults
props := map[string]any{}
reqs := []*Request{
{ID: "req1", Type: "GET", LatencyMS: 0},
}
output, _ := db.Tick(props, reqs, 1)
if len(output) != 1 {
t.Errorf("Expected 1 output request")
}
// Should use default 10ms base latency
if output[0].LatencyMS != 10 {
t.Errorf("Expected default latency 10ms, got %dms", output[0].LatencyMS)
}
}
func TestDatabaseLogic_ReplicationEffect(t *testing.T) {
db := DatabaseLogic{}
// Test with high replication
props := map[string]any{
"replication": 5,
"baseLatencyMs": 10,
}
reqs := []*Request{
{ID: "req1", Type: "POST", LatencyMS: 0},
}
output, _ := db.Tick(props, reqs, 1)
if len(output) != 1 {
t.Errorf("Expected 1 output request")
}
// Write latency: base*2 + (replication-1)*5 = 10*2 + (5-1)*5 = 20 + 20 = 40ms
expectedLatency := 10*2 + (5-1)*5
if output[0].LatencyMS != expectedLatency {
t.Errorf("Expected latency %dms with replication=5, got %dms", expectedLatency, output[0].LatencyMS)
}
}
func TestDatabaseLogic_ReadVsWrite(t *testing.T) {
db := DatabaseLogic{}
props := map[string]any{
"replication": 1,
"baseLatencyMs": 20,
}
readReq := []*Request{{ID: "read", Type: "GET", LatencyMS: 0}}
writeReq := []*Request{{ID: "write", Type: "POST", LatencyMS: 0}}
readOutput, _ := db.Tick(props, readReq, 1)
writeOutput, _ := db.Tick(props, writeReq, 1)
// Read should be base latency
if readOutput[0].LatencyMS != 20 {
t.Errorf("Expected read latency 20ms, got %dms", readOutput[0].LatencyMS)
}
// Write should be double base latency (no replication penalty with replication=1)
if writeOutput[0].LatencyMS != 40 {
t.Errorf("Expected write latency 40ms, got %dms", writeOutput[0].LatencyMS)
}
}

203
internal/simulation/datapipeline.go

@ -1,203 +0,0 @@
package simulation
type DataPipelineLogic struct{}
type DataBatch struct {
ID string
RecordCount int
Timestamp int
ProcessingMS int
}
type PipelineState struct {
ProcessingQueue []DataBatch
CompletedBatches int
TotalRecords int
BacklogSize int
}
func (d DataPipelineLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) {
// Extract data pipeline properties
batchSize := int(AsFloat64(props["batchSize"]))
if batchSize == 0 {
batchSize = 500 // default batch size
}
transformation := AsString(props["transformation"])
if transformation == "" {
transformation = "map" // default transformation
}
// Get pipeline state from props (persistent state)
state, ok := props["_pipelineState"].(PipelineState)
if !ok {
state = PipelineState{
ProcessingQueue: []DataBatch{},
CompletedBatches: 0,
TotalRecords: 0,
BacklogSize: 0,
}
}
currentTime := tick * 100 // Convert tick to milliseconds
// Convert incoming requests to data batches
if len(queue) > 0 {
// Group requests into batches
batches := d.createBatches(queue, batchSize, currentTime, transformation)
// Add batches to processing queue
state.ProcessingQueue = append(state.ProcessingQueue, batches...)
state.BacklogSize += len(queue)
}
// Process batches that are ready (completed their processing time)
output := []*Request{}
remainingBatches := []DataBatch{}
for _, batch := range state.ProcessingQueue {
if currentTime >= batch.Timestamp+batch.ProcessingMS {
// Batch is complete - create output requests
for i := 0; i < batch.RecordCount; i++ {
processedReq := &Request{
ID: batch.ID + "-record-" + string(rune('0'+i)),
Timestamp: batch.Timestamp,
LatencyMS: batch.ProcessingMS,
Origin: "data-pipeline",
Type: "PROCESSED",
Path: []string{"pipeline-" + transformation},
}
output = append(output, processedReq)
}
state.CompletedBatches++
state.TotalRecords += batch.RecordCount
} else {
// Batch still processing
remainingBatches = append(remainingBatches, batch)
}
}
state.ProcessingQueue = remainingBatches
state.BacklogSize = len(remainingBatches) * batchSize
// Update persistent state
props["_pipelineState"] = state
// Health check: pipeline is healthy if backlog is not too large
maxBacklogSize := batchSize * 20 // Allow up to 20 batches in backlog
healthy := state.BacklogSize < maxBacklogSize
return output, healthy
}
// createBatches groups requests into batches and calculates processing time
func (d DataPipelineLogic) createBatches(requests []*Request, batchSize int, timestamp int, transformation string) []DataBatch {
batches := []DataBatch{}
for i := 0; i < len(requests); i += batchSize {
end := i + batchSize
if end > len(requests) {
end = len(requests)
}
recordCount := end - i
processingTime := d.calculateProcessingTime(recordCount, transformation)
batch := DataBatch{
ID: "batch-" + string(rune('A'+len(batches))),
RecordCount: recordCount,
Timestamp: timestamp,
ProcessingMS: processingTime,
}
batches = append(batches, batch)
}
return batches
}
// calculateProcessingTime determines how long a batch takes to process based on transformation type
func (d DataPipelineLogic) calculateProcessingTime(recordCount int, transformation string) int {
// Base processing time per record
baseTimePerRecord := d.getTransformationComplexity(transformation)
// Total time scales with record count but with some economies of scale
totalTime := float64(recordCount) * baseTimePerRecord
// Add batch overhead (setup, teardown, I/O)
batchOverhead := d.getBatchOverhead(transformation)
totalTime += batchOverhead
// Apply economies of scale for larger batches (slightly more efficient)
if recordCount > 100 {
scaleFactor := 0.9 // 10% efficiency gain for large batches
totalTime *= scaleFactor
}
return int(totalTime)
}
// getTransformationComplexity returns base processing time per record in milliseconds
func (d DataPipelineLogic) getTransformationComplexity(transformation string) float64 {
switch transformation {
case "map":
return 1.0 // Simple field mapping
case "filter":
return 0.5 // Just evaluate conditions
case "sort":
return 3.0 // Sorting requires more compute
case "aggregate":
return 2.0 // Grouping and calculating aggregates
case "join":
return 5.0 // Most expensive - joining with other datasets
case "deduplicate":
return 2.5 // Hash-based deduplication
case "validate":
return 1.5 // Data validation and cleaning
case "enrich":
return 4.0 // Enriching with external data
case "compress":
return 1.2 // Compression processing
case "encrypt":
return 2.0 // Encryption overhead
default:
return 1.0 // Default to simple transformation
}
}
// getBatchOverhead returns fixed overhead time per batch in milliseconds
func (d DataPipelineLogic) getBatchOverhead(transformation string) float64 {
switch transformation {
case "map", "filter", "validate":
return 50.0 // Low overhead for simple operations
case "sort", "aggregate", "deduplicate":
return 200.0 // Medium overhead for complex operations
case "join", "enrich":
return 500.0 // High overhead for operations requiring external data
case "compress", "encrypt":
return 100.0 // Medium overhead for I/O operations
default:
return 100.0 // Default overhead
}
}
// Helper function to get pipeline statistics
func (d DataPipelineLogic) GetPipelineStats(props map[string]any) map[string]interface{} {
state, ok := props["_pipelineState"].(PipelineState)
if !ok {
return map[string]interface{}{
"completedBatches": 0,
"totalRecords": 0,
"backlogSize": 0,
"queuedBatches": 0,
}
}
return map[string]interface{}{
"completedBatches": state.CompletedBatches,
"totalRecords": state.TotalRecords,
"backlogSize": state.BacklogSize,
"queuedBatches": len(state.ProcessingQueue),
}
}

396
internal/simulation/datapipeline_test.go

@ -1,396 +0,0 @@
package simulation
import (
"testing"
)
func TestDataPipelineLogic_BasicProcessing(t *testing.T) {
logic := DataPipelineLogic{}
props := map[string]any{
"batchSize": 100.0,
"transformation": "map",
}
// Create 50 requests (less than batch size)
requests := make([]*Request, 50)
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + i)), Type: "DATA", LatencyMS: 0}
}
// First tick - should create batch and start processing
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected data pipeline to be healthy")
}
// Should not have output yet (batch is still processing)
if len(output) != 0 {
t.Errorf("Expected no output during processing, got %d", len(output))
}
// Check that batch was created
state, ok := props["_pipelineState"].(PipelineState)
if !ok {
t.Error("Expected pipeline state to be created")
}
if len(state.ProcessingQueue) != 1 {
t.Errorf("Expected 1 batch in processing queue, got %d", len(state.ProcessingQueue))
}
if state.ProcessingQueue[0].RecordCount != 50 {
t.Errorf("Expected batch with 50 records, got %d", state.ProcessingQueue[0].RecordCount)
}
}
func TestDataPipelineLogic_BatchCompletion(t *testing.T) {
logic := DataPipelineLogic{}
props := map[string]any{
"batchSize": 10.0,
"transformation": "filter", // Fast transformation
}
// Create 5 requests
requests := make([]*Request, 5)
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + i)), Type: "DATA", LatencyMS: 0}
}
// First tick - start processing
logic.Tick(props, requests, 1)
// Wait enough ticks for processing to complete
// Filter transformation should complete quickly
var output []*Request
var healthy bool
for tick := 2; tick <= 10; tick++ {
output, healthy = logic.Tick(props, []*Request{}, tick)
if len(output) > 0 {
break
}
}
if !healthy {
t.Error("Expected data pipeline to be healthy")
}
// Should have output matching input count
if len(output) != 5 {
t.Errorf("Expected 5 output records, got %d", len(output))
}
// Check output structure
for _, req := range output {
if req.Type != "PROCESSED" {
t.Errorf("Expected PROCESSED type, got %s", req.Type)
}
if req.Origin != "data-pipeline" {
t.Errorf("Expected data-pipeline origin, got %s", req.Origin)
}
if len(req.Path) == 0 || req.Path[0] != "pipeline-filter" {
t.Error("Expected path to indicate filter transformation")
}
}
}
func TestDataPipelineLogic_MultipleBatches(t *testing.T) {
logic := DataPipelineLogic{}
props := map[string]any{
"batchSize": 10.0,
"transformation": "map",
}
// Create 25 requests (should create 3 batches: 10, 10, 5)
requests := make([]*Request, 25)
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + i)), Type: "DATA", LatencyMS: 0}
}
// First tick - create batches
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected data pipeline to be healthy")
}
if len(output) != 0 {
t.Error("Expected no immediate output")
}
// Check that 3 batches were created
state, ok := props["_pipelineState"].(PipelineState)
if !ok {
t.Error("Expected pipeline state to be created")
}
if len(state.ProcessingQueue) != 3 {
t.Errorf("Expected 3 batches in processing queue, got %d", len(state.ProcessingQueue))
}
// Verify batch sizes
expectedSizes := []int{10, 10, 5}
for i, batch := range state.ProcessingQueue {
if batch.RecordCount != expectedSizes[i] {
t.Errorf("Expected batch %d to have %d records, got %d",
i, expectedSizes[i], batch.RecordCount)
}
}
}
func TestDataPipelineLogic_TransformationComplexity(t *testing.T) {
logic := DataPipelineLogic{}
transformations := []string{"filter", "map", "sort", "aggregate", "join"}
for _, transformation := range transformations {
t.Run(transformation, func(t *testing.T) {
complexity := logic.getTransformationComplexity(transformation)
// Verify relative complexity ordering
switch transformation {
case "filter":
if complexity >= logic.getTransformationComplexity("map") {
t.Error("Filter should be simpler than map")
}
case "join":
if complexity <= logic.getTransformationComplexity("aggregate") {
t.Error("Join should be more complex than aggregate")
}
case "sort":
if complexity <= logic.getTransformationComplexity("map") {
t.Error("Sort should be more complex than map")
}
}
if complexity <= 0 {
t.Errorf("Expected positive complexity for %s", transformation)
}
})
}
}
func TestDataPipelineLogic_BatchOverhead(t *testing.T) {
logic := DataPipelineLogic{}
// Test different overhead levels
testCases := []struct {
transformation string
expectedRange [2]float64 // [min, max]
}{
{"map", [2]float64{0, 100}}, // Low overhead
{"join", [2]float64{300, 600}}, // High overhead
{"sort", [2]float64{150, 300}}, // Medium overhead
}
for _, tc := range testCases {
overhead := logic.getBatchOverhead(tc.transformation)
if overhead < tc.expectedRange[0] || overhead > tc.expectedRange[1] {
t.Errorf("Expected %s overhead between %.0f-%.0f, got %.0f",
tc.transformation, tc.expectedRange[0], tc.expectedRange[1], overhead)
}
}
}
func TestDataPipelineLogic_ProcessingTime(t *testing.T) {
logic := DataPipelineLogic{}
// Test that processing time scales with record count
smallBatch := logic.calculateProcessingTime(10, "map")
largeBatch := logic.calculateProcessingTime(100, "map")
if largeBatch <= smallBatch {
t.Error("Expected larger batch to take more time")
}
// Test that complex transformations take longer
simpleTime := logic.calculateProcessingTime(50, "filter")
complexTime := logic.calculateProcessingTime(50, "join")
if complexTime <= simpleTime {
t.Error("Expected complex transformation to take longer")
}
// Test economies of scale (large batches should be more efficient per record)
smallPerRecord := float64(smallBatch) / 10.0
largePerRecord := float64(largeBatch) / 100.0
if largePerRecord >= smallPerRecord {
t.Error("Expected economies of scale for larger batches")
}
}
func TestDataPipelineLogic_HealthCheck(t *testing.T) {
logic := DataPipelineLogic{}
props := map[string]any{
"batchSize": 10.0,
"transformation": "join", // Slow transformation
}
// Create a large number of requests to test backlog health
requests := make([]*Request, 300) // 30 batches (above healthy threshold)
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + (i % 26))), Type: "DATA", LatencyMS: 0}
}
// First tick - should create many batches
output, healthy := logic.Tick(props, requests, 1)
// Should be unhealthy due to large backlog
if healthy {
t.Error("Expected data pipeline to be unhealthy with large backlog")
}
if len(output) != 0 {
t.Error("Expected no immediate output with slow transformation")
}
// Check backlog size
state, ok := props["_pipelineState"].(PipelineState)
if !ok {
t.Error("Expected pipeline state to be created")
}
if state.BacklogSize < 200 {
t.Errorf("Expected large backlog, got %d", state.BacklogSize)
}
}
func TestDataPipelineLogic_DefaultValues(t *testing.T) {
logic := DataPipelineLogic{}
// Empty props should use defaults
props := map[string]any{}
requests := []*Request{{ID: "1", Type: "DATA", LatencyMS: 0}}
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected pipeline to be healthy with default values")
}
if len(output) != 0 {
t.Error("Expected no immediate output")
}
// Should use default batch size and transformation
state, ok := props["_pipelineState"].(PipelineState)
if !ok {
t.Error("Expected pipeline state to be created with defaults")
}
if len(state.ProcessingQueue) != 1 {
t.Error("Expected one batch with default settings")
}
}
func TestDataPipelineLogic_PipelineStats(t *testing.T) {
logic := DataPipelineLogic{}
props := map[string]any{
"batchSize": 5.0,
"transformation": "filter",
}
// Initial stats should be empty
stats := logic.GetPipelineStats(props)
if stats["completedBatches"] != 0 {
t.Error("Expected initial completed batches to be 0")
}
// Process some data
requests := make([]*Request, 10)
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + i)), Type: "DATA", LatencyMS: 0}
}
logic.Tick(props, requests, 1)
// Check stats after processing
stats = logic.GetPipelineStats(props)
if stats["queuedBatches"] != 2 {
t.Errorf("Expected 2 queued batches, got %v", stats["queuedBatches"])
}
if stats["backlogSize"] != 10 {
t.Errorf("Expected backlog size of 10, got %v", stats["backlogSize"])
}
}
func TestDataPipelineLogic_ContinuousProcessing(t *testing.T) {
logic := DataPipelineLogic{}
props := map[string]any{
"batchSize": 5.0,
"transformation": "map",
}
// Process multiple waves of data
totalOutput := 0
for wave := 0; wave < 3; wave++ {
requests := make([]*Request, 5)
for i := range requests {
requests[i] = &Request{ID: string(rune('A' + wave*5 + i)), Type: "DATA", LatencyMS: 0}
}
// Process each wave
for tick := wave*10 + 1; tick <= wave*10+5; tick++ {
var output []*Request
if tick == wave*10+1 {
output, _ = logic.Tick(props, requests, tick)
} else {
output, _ = logic.Tick(props, []*Request{}, tick)
}
totalOutput += len(output)
}
}
// Should have processed all data eventually
if totalOutput != 15 {
t.Errorf("Expected 15 total output records, got %d", totalOutput)
}
// Check final stats
stats := logic.GetPipelineStats(props)
if stats["totalRecords"] != 15 {
t.Errorf("Expected 15 total records processed, got %v", stats["totalRecords"])
}
}
func TestDataPipelineLogic_EmptyQueue(t *testing.T) {
logic := DataPipelineLogic{}
props := map[string]any{
"batchSize": 10.0,
"transformation": "map",
}
// Process empty queue
output, healthy := logic.Tick(props, []*Request{}, 1)
if !healthy {
t.Error("Expected pipeline to be healthy with empty queue")
}
if len(output) != 0 {
t.Error("Expected no output with empty queue")
}
// State should be initialized but empty
state, ok := props["_pipelineState"].(PipelineState)
if !ok {
t.Error("Expected pipeline state to be initialized")
}
if len(state.ProcessingQueue) != 0 {
t.Error("Expected empty processing queue")
}
}

22
internal/simulation/engine.go

@ -33,8 +33,6 @@ type Request struct {
Type string Type string
// records where it's been (used to prevent loops) // records where it's been (used to prevent loops)
Path []string Path []string
// cache key for cache-aside pattern (used by microservices)
CacheKey string
} }
// what hte system looks like given a tick // what hte system looks like given a tick
@ -130,7 +128,7 @@ func (e *SimulationEngine) Run(duration int, tickMs int) []*TickSnapshot {
} }
// this will preopulate some props so that we can use different load balancing algorithms // this will preopulate some props so that we can use different load balancing algorithms
if node.Type == "loadbalancer" || node.Type == "loadBalancer" { if node.Type == "loadbalancer" {
targets := e.Edges[id] targets := e.Edges[id]
node.Props["_numTargets"] = float64(len(targets)) node.Props["_numTargets"] = float64(len(targets))
node.Props["_targetIDs"] = targets node.Props["_targetIDs"] = targets
@ -181,28 +179,12 @@ func (e *SimulationEngine) Run(duration int, tickMs int) []*TickSnapshot {
func GetLogicForType(t string) NodeLogic { func GetLogicForType(t string) NodeLogic {
switch t { switch t {
case "user":
return UserLogic{}
case "webserver": case "webserver":
return WebServerLogic{} return WebServerLogic{}
case "loadBalancer": case "loadbalancer":
return LoadBalancerLogic{} return LoadBalancerLogic{}
case "cdn": case "cdn":
return CDNLogic{} return CDNLogic{}
case "database":
return DatabaseLogic{}
case "cache":
return CacheLogic{}
case "messageQueue":
return MessageQueueLogic{}
case "microservice":
return MicroserviceLogic{}
case "monitoring/alerting":
return MonitoringLogic{}
case "third party service":
return ThirdPartyServiceLogic{}
case "data pipeline":
return DataPipelineLogic{}
default: default:
return nil return nil
} }

863
internal/simulation/engine_test.go

@ -1,8 +1,6 @@
package simulation package simulation
import ( import (
"encoding/json"
"os"
"testing" "testing"
"systemdesigngame/internal/design" "systemdesigngame/internal/design"
@ -12,8 +10,8 @@ import (
func TestSimpleChainSimulation(t *testing.T) { func TestSimpleChainSimulation(t *testing.T) {
d := design.Design{ d := design.Design{
Nodes: []design.Node{ Nodes: []design.Node{
{ID: "a", Type: "webserver", Props: map[string]any{"rpsCapacity": 1, "baseLatencyMs": 10}}, {ID: "a", Type: "webserver", Props: map[string]any{"capacityRPS": 1, "baseLatencyMs": 10}},
{ID: "b", Type: "webserver", Props: map[string]any{"rpsCapacity": 1, "baseLatencyMs": 10}}, {ID: "b", Type: "webserver", Props: map[string]any{"capacityRPS": 1, "baseLatencyMs": 10}},
}, },
Connections: []design.Connection{ Connections: []design.Connection{
{Source: "a", Target: "b"}, {Source: "a", Target: "b"},
@ -51,8 +49,8 @@ func TestSimpleChainSimulation(t *testing.T) {
func TestSingleTickRouting(t *testing.T) { func TestSingleTickRouting(t *testing.T) {
d := design.Design{ d := design.Design{
Nodes: []design.Node{ Nodes: []design.Node{
{ID: "a", Type: "webserver", Props: map[string]any{"rpsCapacity": 1.0, "baseLatencyMs": 10.0}}, {ID: "a", Type: "webserver", Props: map[string]any{"capacityRPS": 1.0, "baseLatencyMs": 10.0}},
{ID: "b", Type: "webserver", Props: map[string]any{"rpsCapacity": 1.0, "baseLatencyMs": 10.0}}, {ID: "b", Type: "webserver", Props: map[string]any{"capacityRPS": 1.0, "baseLatencyMs": 10.0}},
}, },
Connections: []design.Connection{ Connections: []design.Connection{
{Source: "a", Target: "b"}, {Source: "a", Target: "b"},
@ -87,7 +85,7 @@ func TestSingleTickRouting(t *testing.T) {
func TestHighRPSSimulation(t *testing.T) { func TestHighRPSSimulation(t *testing.T) {
d := design.Design{ d := design.Design{
Nodes: []design.Node{ Nodes: []design.Node{
{ID: "entry", Type: "webserver", Props: map[string]any{"rpsCapacity": 5000, "baseLatencyMs": 1}}, {ID: "entry", Type: "webserver", Props: map[string]any{"capacityRPS": 5000, "baseLatencyMs": 1}},
}, },
Connections: []design.Connection{}, Connections: []design.Connection{},
} }
@ -108,854 +106,3 @@ func TestHighRPSSimulation(t *testing.T) {
t.Errorf("expected %d total emitted requests, got %d", expected, totalEmitted) t.Errorf("expected %d total emitted requests, got %d", expected, totalEmitted)
} }
} }
func TestDatabaseIntegration(t *testing.T) {
design := design.Design{
Nodes: []design.Node{
{
ID: "webserver",
Type: "webserver",
Props: map[string]interface{}{
"rpsCapacity": 10,
},
},
{
ID: "database",
Type: "database",
Props: map[string]interface{}{
"replication": 2,
"maxRPS": 100,
"baseLatencyMs": 20,
},
},
},
Connections: []design.Connection{
{
Source: "webserver",
Target: "database",
},
},
}
engine := NewEngineFromDesign(design, 100)
engine.RPS = 5
engine.EntryNode = "webserver"
snapshots := engine.Run(3, 100)
if len(snapshots) != 3 {
t.Errorf("Expected 3 snapshots, got %d", len(snapshots))
}
// Verify database node exists and is healthy
if len(engine.Nodes) != 2 {
t.Errorf("Expected 2 nodes (webserver + database), got %d", len(engine.Nodes))
}
dbNode, exists := engine.Nodes["database"]
if !exists {
t.Errorf("Database node should exist in simulation")
}
if !dbNode.Alive {
t.Errorf("Database node should be alive")
}
if dbNode.Type != "database" {
t.Errorf("Expected database type, got %s", dbNode.Type)
}
}
func TestCacheIntegration(t *testing.T) {
design := design.Design{
Nodes: []design.Node{
{
ID: "webserver",
Type: "webserver",
Props: map[string]interface{}{
"rpsCapacity": 10,
},
},
{
ID: "cache",
Type: "cache",
Props: map[string]interface{}{
"cacheTTL": 5000,
"maxEntries": 50,
"evictionPolicy": "LRU",
},
},
{
ID: "database",
Type: "database",
Props: map[string]interface{}{
"replication": 1,
"maxRPS": 100,
"baseLatencyMs": 15,
},
},
},
Connections: []design.Connection{
{
Source: "webserver",
Target: "cache",
},
{
Source: "cache",
Target: "database",
},
},
}
engine := NewEngineFromDesign(design, 100)
engine.RPS = 5
engine.EntryNode = "webserver"
snapshots := engine.Run(5, 100)
if len(snapshots) != 5 {
t.Errorf("Expected 5 snapshots, got %d", len(snapshots))
}
// Verify all nodes exist and are healthy
if len(engine.Nodes) != 3 {
t.Errorf("Expected 3 nodes (webserver + cache + database), got %d", len(engine.Nodes))
}
cacheNode, exists := engine.Nodes["cache"]
if !exists {
t.Errorf("Cache node should exist in simulation")
}
if !cacheNode.Alive {
t.Errorf("Cache node should be alive")
}
if cacheNode.Type != "cache" {
t.Errorf("Expected cache type, got %s", cacheNode.Type)
}
// Verify cache has internal state
cacheData, ok := cacheNode.Props["_cacheData"]
if !ok {
t.Errorf("Cache should have internal _cacheData state")
}
// Cache data should be a map
if _, ok := cacheData.(map[string]*CacheEntry); !ok {
t.Errorf("Cache data should be map[string]*CacheEntry")
}
}
func TestMessageQueueIntegration(t *testing.T) {
design := design.Design{
Nodes: []design.Node{
{
ID: "producer",
Type: "webserver",
Props: map[string]interface{}{
"rpsCapacity": 10,
},
},
{
ID: "messagequeue",
Type: "messageQueue",
Props: map[string]interface{}{
"queueCapacity": 50,
"retentionSeconds": 3600,
"processingRate": 5,
},
},
{
ID: "consumer",
Type: "webserver",
Props: map[string]interface{}{
"rpsCapacity": 20,
},
},
},
Connections: []design.Connection{
{
Source: "producer",
Target: "messagequeue",
},
{
Source: "messagequeue",
Target: "consumer",
},
},
}
engine := NewEngineFromDesign(design, 100)
engine.RPS = 3
engine.EntryNode = "producer"
snapshots := engine.Run(5, 100)
if len(snapshots) != 5 {
t.Errorf("Expected 5 snapshots, got %d", len(snapshots))
}
// Verify all nodes exist and are healthy
if len(engine.Nodes) != 3 {
t.Errorf("Expected 3 nodes (producer + queue + consumer), got %d", len(engine.Nodes))
}
queueNode, exists := engine.Nodes["messagequeue"]
if !exists {
t.Errorf("Message queue node should exist in simulation")
}
if !queueNode.Alive {
t.Errorf("Message queue node should be alive")
}
if queueNode.Type != "messageQueue" {
t.Errorf("Expected messageQueue type, got %s", queueNode.Type)
}
// Verify queue has internal state
messageQueue, ok := queueNode.Props["_messageQueue"]
if !ok {
t.Errorf("Message queue should have internal _messageQueue state")
}
// Message queue should be a slice
if _, ok := messageQueue.([]QueuedMessage); !ok {
t.Errorf("Message queue should be []QueuedMessage")
}
}
func TestMicroserviceIntegration(t *testing.T) {
// Load the microservice design
designData, err := os.ReadFile("testdata/microservice_design.json")
if err != nil {
t.Fatalf("Failed to read microservice design: %v", err)
}
var d design.Design
if err := json.Unmarshal(designData, &d); err != nil {
t.Fatalf("Failed to unmarshal design: %v", err)
}
// Create engine
engine := NewEngineFromDesign(d, 100)
if engine == nil {
t.Fatalf("Failed to create engine from microservice design")
}
// Set up simulation parameters
engine.RPS = 30
engine.EntryNode = "webserver-1"
// Run simulation for 5 ticks
snapshots := engine.Run(5, 100)
if len(snapshots) != 5 {
t.Errorf("Expected 5 snapshots, got %d", len(snapshots))
}
// Verify microservice nodes exist and are configured correctly
userService, exists := engine.Nodes["microservice-1"]
if !exists {
t.Errorf("User service microservice node should exist")
}
if !userService.Alive {
t.Errorf("User service should be alive")
}
if userService.Type != "microservice" {
t.Errorf("Expected microservice type, got %s", userService.Type)
}
orderService, exists := engine.Nodes["microservice-2"]
if !exists {
t.Errorf("Order service microservice node should exist")
}
if !orderService.Alive {
t.Errorf("Order service should be alive")
}
// Verify auto-scaling properties are preserved
userServiceInstanceCount := userService.Props["instanceCount"]
if userServiceInstanceCount == nil {
t.Errorf("User service should have instanceCount property")
}
// Verify different scaling strategies
userScalingStrategy := userService.Props["scalingStrategy"]
if userScalingStrategy != "auto" {
t.Errorf("Expected auto scaling strategy for user service, got %v", userScalingStrategy)
}
orderScalingStrategy := orderService.Props["scalingStrategy"]
if orderScalingStrategy != "manual" {
t.Errorf("Expected manual scaling strategy for order service, got %v", orderScalingStrategy)
}
// Verify resource configurations
userCPU := userService.Props["cpu"]
if userCPU != 4.0 {
t.Errorf("Expected user service to have 4 CPU cores, got %v", userCPU)
}
orderRAM := orderService.Props["ramGb"]
if orderRAM != 4.0 {
t.Errorf("Expected order service to have 4GB RAM, got %v", orderRAM)
}
// Check that microservices processed requests through the simulation
lastSnapshot := snapshots[len(snapshots)-1]
if len(lastSnapshot.QueueSizes) == 0 {
t.Errorf("Expected queue sizes to be tracked in snapshots")
}
// Verify load balancer connected to microservices
loadBalancer, exists := engine.Nodes["lb-1"]
if !exists {
t.Errorf("Load balancer should exist")
}
if !loadBalancer.Alive {
t.Errorf("Load balancer should be alive")
}
// Verify database connection exists
database, exists := engine.Nodes["db-1"]
if !exists {
t.Errorf("Database should exist")
}
if !database.Alive {
t.Errorf("Database should be alive")
}
}
func TestMonitoringIntegration(t *testing.T) {
// Load the monitoring design
designData, err := os.ReadFile("testdata/monitoring_design.json")
if err != nil {
t.Fatalf("Failed to read monitoring design: %v", err)
}
var d design.Design
if err := json.Unmarshal(designData, &d); err != nil {
t.Fatalf("Failed to unmarshal design: %v", err)
}
// Create engine
engine := NewEngineFromDesign(d, 100)
if engine == nil {
t.Fatalf("Failed to create engine from monitoring design")
}
// Set up simulation parameters
engine.RPS = 20
engine.EntryNode = "webserver-1"
// Run simulation for 10 ticks to allow metrics collection
snapshots := engine.Run(10, 100)
if len(snapshots) != 10 {
t.Errorf("Expected 10 snapshots, got %d", len(snapshots))
}
// Verify monitoring nodes exist and are configured correctly
monitor1, exists := engine.Nodes["monitor-1"]
if !exists {
t.Errorf("Latency monitor node should exist")
}
if !monitor1.Alive {
t.Errorf("Latency monitor should be alive")
}
if monitor1.Type != "monitoring/alerting" {
t.Errorf("Expected monitoring/alerting type, got %s", monitor1.Type)
}
monitor2, exists := engine.Nodes["monitor-2"]
if !exists {
t.Errorf("Error rate monitor node should exist")
}
if !monitor2.Alive {
t.Errorf("Error rate monitor should be alive")
}
// Verify monitoring properties are preserved
tool1 := monitor1.Props["tool"]
if tool1 != "Prometheus" {
t.Errorf("Expected Prometheus tool for monitor-1, got %v", tool1)
}
tool2 := monitor2.Props["tool"]
if tool2 != "Datadog" {
t.Errorf("Expected Datadog tool for monitor-2, got %v", tool2)
}
alertMetric1 := monitor1.Props["alertMetric"]
if alertMetric1 != "latency" {
t.Errorf("Expected latency alert metric for monitor-1, got %v", alertMetric1)
}
alertMetric2 := monitor2.Props["alertMetric"]
if alertMetric2 != "error_rate" {
t.Errorf("Expected error_rate alert metric for monitor-2, got %v", alertMetric2)
}
// Check that metrics were collected during simulation
metrics1, ok := monitor1.Props["_metrics"]
if !ok {
t.Errorf("Expected monitor-1 to have collected metrics")
}
if metrics1 == nil {
t.Errorf("Expected monitor-1 metrics to be non-nil")
}
// Check alert count tracking
alertCount1, ok := monitor1.Props["_alertCount"]
if !ok {
t.Errorf("Expected monitor-1 to track alert count")
}
if alertCount1 == nil {
t.Errorf("Expected monitor-1 alert count to be tracked")
}
// Verify other components in the chain
webserver, exists := engine.Nodes["webserver-1"]
if !exists {
t.Errorf("Web server should exist")
}
if !webserver.Alive {
t.Errorf("Web server should be alive")
}
loadBalancer, exists := engine.Nodes["lb-1"]
if !exists {
t.Errorf("Load balancer should exist")
}
if !loadBalancer.Alive {
t.Errorf("Load balancer should be alive")
}
// Verify microservices
userService, exists := engine.Nodes["microservice-1"]
if !exists {
t.Errorf("User service should exist")
}
if !userService.Alive {
t.Errorf("User service should be alive")
}
orderService, exists := engine.Nodes["microservice-2"]
if !exists {
t.Errorf("Order service should exist")
}
if !orderService.Alive {
t.Errorf("Order service should be alive")
}
// Verify database
database, exists := engine.Nodes["db-1"]
if !exists {
t.Errorf("Database should exist")
}
if !database.Alive {
t.Errorf("Database should be alive")
}
// Check that requests flowed through the monitoring chain
lastSnapshot := snapshots[len(snapshots)-1]
if len(lastSnapshot.QueueSizes) == 0 {
t.Errorf("Expected queue sizes to be tracked in snapshots")
}
// Verify monitoring nodes processed requests
if lastSnapshot.NodeHealth["monitor-1"] != true {
t.Errorf("Expected monitor-1 to be healthy in final snapshot")
}
if lastSnapshot.NodeHealth["monitor-2"] != true {
t.Errorf("Expected monitor-2 to be healthy in final snapshot")
}
}
func TestThirdPartyServiceIntegration(t *testing.T) {
// Load the third party service design
designData, err := os.ReadFile("testdata/thirdpartyservice_design.json")
if err != nil {
t.Fatalf("Failed to read third party service design: %v", err)
}
var d design.Design
if err := json.Unmarshal(designData, &d); err != nil {
t.Fatalf("Failed to unmarshal design: %v", err)
}
// Create engine
engine := NewEngineFromDesign(d, 100)
if engine == nil {
t.Fatalf("Failed to create engine from third party service design")
}
// Set up simulation parameters
engine.RPS = 10 // Lower RPS to reduce chance of random failures affecting health
engine.EntryNode = "webserver-1"
// Run simulation for 5 ticks (shorter run to reduce random failure impact)
snapshots := engine.Run(5, 100)
if len(snapshots) != 5 {
t.Errorf("Expected 5 snapshots, got %d", len(snapshots))
}
// Verify third party service nodes exist and are configured correctly
stripeService, exists := engine.Nodes["stripe-service"]
if !exists {
t.Errorf("Stripe service node should exist")
}
if stripeService.Type != "third party service" {
t.Errorf("Expected third party service type, got %s", stripeService.Type)
}
twilioService, exists := engine.Nodes["twilio-service"]
if !exists {
t.Errorf("Twilio service node should exist")
}
sendgridService, exists := engine.Nodes["sendgrid-service"]
if !exists {
t.Errorf("SendGrid service node should exist")
}
slackService, exists := engine.Nodes["slack-service"]
if !exists {
t.Errorf("Slack service node should exist")
}
// Note: We don't check if services are alive here because the random failure
// simulation can cause services to go down, which is realistic behavior
// Verify provider configurations are preserved
stripeProvider := stripeService.Props["provider"]
if stripeProvider != "Stripe" {
t.Errorf("Expected Stripe provider, got %v", stripeProvider)
}
twilioProvider := twilioService.Props["provider"]
if twilioProvider != "Twilio" {
t.Errorf("Expected Twilio provider, got %v", twilioProvider)
}
sendgridProvider := sendgridService.Props["provider"]
if sendgridProvider != "SendGrid" {
t.Errorf("Expected SendGrid provider, got %v", sendgridProvider)
}
slackProvider := slackService.Props["provider"]
if slackProvider != "Slack" {
t.Errorf("Expected Slack provider, got %v", slackProvider)
}
// Verify latency configurations
stripeLatency := stripeService.Props["latency"]
if stripeLatency != 180.0 {
t.Errorf("Expected Stripe latency 180, got %v", stripeLatency)
}
twilioLatency := twilioService.Props["latency"]
if twilioLatency != 250.0 {
t.Errorf("Expected Twilio latency 250, got %v", twilioLatency)
}
// Check that service status was initialized and tracked
stripeStatus, ok := stripeService.Props["_serviceStatus"]
if !ok {
t.Errorf("Expected Stripe service status to be tracked")
}
if stripeStatus == nil {
t.Errorf("Expected Stripe service status to be non-nil")
}
// Verify other components in the chain
webserver, exists := engine.Nodes["webserver-1"]
if !exists {
t.Errorf("Web server should exist")
}
if !webserver.Alive {
t.Errorf("Web server should be alive")
}
// Verify microservices
paymentService, exists := engine.Nodes["microservice-1"]
if !exists {
t.Errorf("Payment service should exist")
}
if !paymentService.Alive {
t.Errorf("Payment service should be alive")
}
notificationService, exists := engine.Nodes["microservice-2"]
if !exists {
t.Errorf("Notification service should exist")
}
if !notificationService.Alive {
t.Errorf("Notification service should be alive")
}
// Verify monitoring and database
monitor, exists := engine.Nodes["monitor-1"]
if !exists {
t.Errorf("Monitor should exist")
}
if !monitor.Alive {
t.Errorf("Monitor should be alive")
}
database, exists := engine.Nodes["db-1"]
if !exists {
t.Errorf("Database should exist")
}
if !database.Alive {
t.Errorf("Database should be alive")
}
// Check that requests flowed through the third party services
lastSnapshot := snapshots[len(snapshots)-1]
if len(lastSnapshot.QueueSizes) == 0 {
t.Errorf("Expected queue sizes to be tracked in snapshots")
}
// Verify third party services are being tracked in snapshots
// Note: We don't assert health status because random failures are realistic
_, stripeHealthTracked := lastSnapshot.NodeHealth["stripe-service"]
if !stripeHealthTracked {
t.Errorf("Expected Stripe service health to be tracked in snapshots")
}
_, twilioHealthTracked := lastSnapshot.NodeHealth["twilio-service"]
if !twilioHealthTracked {
t.Errorf("Expected Twilio service health to be tracked in snapshots")
}
_, sendgridHealthTracked := lastSnapshot.NodeHealth["sendgrid-service"]
if !sendgridHealthTracked {
t.Errorf("Expected SendGrid service health to be tracked in snapshots")
}
_, slackHealthTracked := lastSnapshot.NodeHealth["slack-service"]
if !slackHealthTracked {
t.Errorf("Expected Slack service health to be tracked in snapshots")
}
}
func TestDataPipelineIntegration(t *testing.T) {
// Load the data pipeline design
designData, err := os.ReadFile("testdata/datapipeline_design.json")
if err != nil {
t.Fatalf("Failed to read data pipeline design: %v", err)
}
var d design.Design
if err := json.Unmarshal(designData, &d); err != nil {
t.Fatalf("Failed to unmarshal design: %v", err)
}
// Create engine
engine := NewEngineFromDesign(d, 100)
if engine == nil {
t.Fatalf("Failed to create engine from data pipeline design")
}
// Set up simulation parameters
engine.RPS = 20
engine.EntryNode = "data-source"
// Run simulation for 10 ticks to test data pipeline processing
snapshots := engine.Run(10, 100)
if len(snapshots) != 10 {
t.Errorf("Expected 10 snapshots, got %d", len(snapshots))
}
// Verify data pipeline nodes exist and are configured correctly
etlPipeline1, exists := engine.Nodes["etl-pipeline-1"]
if !exists {
t.Errorf("ETL Pipeline 1 node should exist")
}
if etlPipeline1.Type != "data pipeline" {
t.Errorf("Expected data pipeline type, got %s", etlPipeline1.Type)
}
etlPipeline2, exists := engine.Nodes["etl-pipeline-2"]
if !exists {
t.Errorf("ETL Pipeline 2 node should exist")
}
mlPipeline, exists := engine.Nodes["ml-pipeline"]
if !exists {
t.Errorf("ML Pipeline node should exist")
}
analyticsPipeline, exists := engine.Nodes["analytics-pipeline"]
if !exists {
t.Errorf("Analytics Pipeline node should exist")
}
compressionPipeline, exists := engine.Nodes["compression-pipeline"]
if !exists {
t.Errorf("Compression Pipeline node should exist")
}
// Verify pipeline configurations are preserved
etl1BatchSize := etlPipeline1.Props["batchSize"]
if etl1BatchSize != 100.0 {
t.Errorf("Expected ETL Pipeline 1 batch size 100, got %v", etl1BatchSize)
}
etl1Transformation := etlPipeline1.Props["transformation"]
if etl1Transformation != "validate" {
t.Errorf("Expected validate transformation, got %v", etl1Transformation)
}
etl2BatchSize := etlPipeline2.Props["batchSize"]
if etl2BatchSize != 50.0 {
t.Errorf("Expected ETL Pipeline 2 batch size 50, got %v", etl2BatchSize)
}
etl2Transformation := etlPipeline2.Props["transformation"]
if etl2Transformation != "aggregate" {
t.Errorf("Expected aggregate transformation, got %v", etl2Transformation)
}
mlTransformation := mlPipeline.Props["transformation"]
if mlTransformation != "enrich" {
t.Errorf("Expected enrich transformation for ML pipeline, got %v", mlTransformation)
}
analyticsTransformation := analyticsPipeline.Props["transformation"]
if analyticsTransformation != "join" {
t.Errorf("Expected join transformation for analytics pipeline, got %v", analyticsTransformation)
}
compressionTransformation := compressionPipeline.Props["transformation"]
if compressionTransformation != "compress" {
t.Errorf("Expected compress transformation, got %v", compressionTransformation)
}
// Check that pipeline state was initialized and tracked
etl1State, ok := etlPipeline1.Props["_pipelineState"]
if !ok {
t.Errorf("Expected ETL Pipeline 1 to have pipeline state")
}
if etl1State == nil {
t.Errorf("Expected ETL Pipeline 1 state to be non-nil")
}
// Verify other components in the data flow
dataSource, exists := engine.Nodes["data-source"]
if !exists {
t.Errorf("Data source should exist")
}
if !dataSource.Alive {
t.Errorf("Data source should be alive")
}
rawDataQueue, exists := engine.Nodes["raw-data-queue"]
if !exists {
t.Errorf("Raw data queue should exist")
}
if !rawDataQueue.Alive {
t.Errorf("Raw data queue should be alive")
}
// Verify storage components
cache, exists := engine.Nodes["cache-1"]
if !exists {
t.Errorf("Feature cache should exist")
}
if !cache.Alive {
t.Errorf("Feature cache should be alive")
}
dataWarehouse, exists := engine.Nodes["data-warehouse"]
if !exists {
t.Errorf("Data warehouse should exist")
}
if !dataWarehouse.Alive {
t.Errorf("Data warehouse should be alive")
}
// Verify monitoring
monitor, exists := engine.Nodes["monitoring-1"]
if !exists {
t.Errorf("Pipeline monitor should exist")
}
if !monitor.Alive {
t.Errorf("Pipeline monitor should be alive")
}
// Check that data pipelines are being tracked in snapshots
lastSnapshot := snapshots[len(snapshots)-1]
if len(lastSnapshot.QueueSizes) == 0 {
t.Errorf("Expected queue sizes to be tracked in snapshots")
}
// Verify data pipeline health is tracked
_, etl1HealthTracked := lastSnapshot.NodeHealth["etl-pipeline-1"]
if !etl1HealthTracked {
t.Errorf("Expected ETL Pipeline 1 health to be tracked in snapshots")
}
_, etl2HealthTracked := lastSnapshot.NodeHealth["etl-pipeline-2"]
if !etl2HealthTracked {
t.Errorf("Expected ETL Pipeline 2 health to be tracked in snapshots")
}
_, mlHealthTracked := lastSnapshot.NodeHealth["ml-pipeline"]
if !mlHealthTracked {
t.Errorf("Expected ML Pipeline health to be tracked in snapshots")
}
_, analyticsHealthTracked := lastSnapshot.NodeHealth["analytics-pipeline"]
if !analyticsHealthTracked {
t.Errorf("Expected Analytics Pipeline health to be tracked in snapshots")
}
_, compressionHealthTracked := lastSnapshot.NodeHealth["compression-pipeline"]
if !compressionHealthTracked {
t.Errorf("Expected Compression Pipeline health to be tracked in snapshots")
}
// Verify the data flow chain exists (all components are connected)
// This ensures the integration test validates the complete data processing architecture
totalNodes := len(engine.Nodes)
expectedNodes := 10 // From the design JSON
if totalNodes != expectedNodes {
t.Errorf("Expected %d total nodes in data pipeline architecture, got %d", expectedNodes, totalNodes)
}
}

115
internal/simulation/messagequeue.go

@ -1,115 +0,0 @@
package simulation
type MessageQueueLogic struct{}
type QueuedMessage struct {
RequestID string
Timestamp int
MessageData string
RetryCount int
}
func (mq MessageQueueLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) {
// Extract message queue properties
queueCapacity := int(AsFloat64(props["queueCapacity"]))
if queueCapacity == 0 {
queueCapacity = 1000 // default capacity
}
retentionSeconds := int(AsFloat64(props["retentionSeconds"]))
if retentionSeconds == 0 {
retentionSeconds = 86400 // default 24 hours in seconds
}
// Processing rate (messages per tick)
processingRate := int(AsFloat64(props["processingRate"]))
if processingRate == 0 {
processingRate = 100 // default 100 messages per tick
}
// Current timestamp for this tick
currentTime := tick * 100 // assuming 100ms per tick
// Initialize queue storage in props
messageQueue, ok := props["_messageQueue"].([]QueuedMessage)
if !ok {
messageQueue = []QueuedMessage{}
}
// Clean up expired messages based on retention policy
messageQueue = mq.cleanExpiredMessages(messageQueue, currentTime, retentionSeconds*1000)
// First, process existing messages from the queue (FIFO order)
output := []*Request{}
messagesToProcess := len(messageQueue)
if messagesToProcess > processingRate {
messagesToProcess = processingRate
}
for i := 0; i < messagesToProcess; i++ {
if len(messageQueue) == 0 {
break
}
// Dequeue message (FIFO - take from front)
message := messageQueue[0]
messageQueue = messageQueue[1:]
// Create request for downstream processing
processedReq := &Request{
ID: message.RequestID,
Timestamp: message.Timestamp,
LatencyMS: 2, // Small latency for queue processing
Origin: "message-queue",
Type: "PROCESS",
Path: []string{"queued-message"},
}
output = append(output, processedReq)
}
// Then, add incoming requests to the queue for next tick
for _, req := range queue {
// Check if queue is at capacity
if len(messageQueue) >= queueCapacity {
// Queue full - message is dropped (or could implement backpressure)
// For now, we'll drop the message and add latency penalty
reqCopy := *req
reqCopy.LatencyMS += 1000 // High latency penalty for dropped messages
reqCopy.Path = append(reqCopy.Path, "queue-full-dropped")
// Don't add to output as message was dropped
continue
}
// Add message to queue
message := QueuedMessage{
RequestID: req.ID,
Timestamp: currentTime,
MessageData: "message-payload", // In real system, this would be the actual message
RetryCount: 0,
}
messageQueue = append(messageQueue, message)
}
// Update queue storage in props
props["_messageQueue"] = messageQueue
// Queue is healthy if not at capacity or if we can still process messages
// Queue becomes unhealthy only when completely full AND we can't process anything
healthy := len(messageQueue) < queueCapacity || processingRate > 0
return output, healthy
}
func (mq MessageQueueLogic) cleanExpiredMessages(messageQueue []QueuedMessage, currentTime, retentionMs int) []QueuedMessage {
cleaned := []QueuedMessage{}
for _, message := range messageQueue {
if (currentTime - message.Timestamp) <= retentionMs {
cleaned = append(cleaned, message)
}
// Expired messages are dropped
}
return cleaned
}

329
internal/simulation/messagequeue_test.go

@ -1,329 +0,0 @@
package simulation
import (
"testing"
)
func TestMessageQueueLogic_BasicProcessing(t *testing.T) {
mq := MessageQueueLogic{}
props := map[string]any{
"queueCapacity": 10,
"retentionSeconds": 3600, // 1 hour
"processingRate": 5,
}
// Add some messages to the queue
reqs := []*Request{
{ID: "msg1", Type: "SEND", LatencyMS: 0, Timestamp: 100},
{ID: "msg2", Type: "SEND", LatencyMS: 0, Timestamp: 100},
{ID: "msg3", Type: "SEND", LatencyMS: 0, Timestamp: 100},
}
output, healthy := mq.Tick(props, reqs, 1)
if !healthy {
t.Errorf("Message queue should be healthy")
}
// No immediate output since messages are queued first
if len(output) != 0 {
t.Errorf("Expected 0 immediate output (messages queued), got %d", len(output))
}
// Check that messages are in the queue
messageQueue, ok := props["_messageQueue"].([]QueuedMessage)
if !ok {
t.Errorf("Expected message queue to be initialized")
}
if len(messageQueue) != 3 {
t.Errorf("Expected 3 messages in queue, got %d", len(messageQueue))
}
// Process the queue (no new incoming messages)
output2, _ := mq.Tick(props, []*Request{}, 2)
// Should process up to processingRate (5) messages
if len(output2) != 3 {
t.Errorf("Expected 3 processed messages, got %d", len(output2))
}
// Queue should now be empty
messageQueue2, _ := props["_messageQueue"].([]QueuedMessage)
if len(messageQueue2) != 0 {
t.Errorf("Expected empty queue after processing, got %d messages", len(messageQueue2))
}
// Check output message properties
for _, msg := range output2 {
if msg.LatencyMS != 2 {
t.Errorf("Expected 2ms processing latency, got %dms", msg.LatencyMS)
}
if msg.Type != "PROCESS" {
t.Errorf("Expected PROCESS type, got %s", msg.Type)
}
}
}
func TestMessageQueueLogic_CapacityLimit(t *testing.T) {
mq := MessageQueueLogic{}
props := map[string]any{
"queueCapacity": 2, // Small capacity
"retentionSeconds": 3600,
"processingRate": 1,
}
// Add more messages than capacity
reqs := []*Request{
{ID: "msg1", Type: "SEND", LatencyMS: 0},
{ID: "msg2", Type: "SEND", LatencyMS: 0},
{ID: "msg3", Type: "SEND", LatencyMS: 0}, // This should be dropped
}
output, healthy := mq.Tick(props, reqs, 1)
// Queue should be healthy (can still process messages)
if !healthy {
t.Errorf("Queue should be healthy (can still process)")
}
// Should have no immediate output (messages queued)
if len(output) != 0 {
t.Errorf("Expected 0 immediate output, got %d", len(output))
}
// Check queue size
messageQueue, _ := props["_messageQueue"].([]QueuedMessage)
if len(messageQueue) != 2 {
t.Errorf("Expected 2 messages in queue (capacity limit), got %d", len(messageQueue))
}
// Add another message when queue is full
reqs2 := []*Request{{ID: "msg4", Type: "SEND", LatencyMS: 0}}
output2, healthy2 := mq.Tick(props, reqs2, 2)
// Queue should still be healthy (can process messages)
if !healthy2 {
t.Errorf("Queue should remain healthy (can still process)")
}
// Should have 1 processed message (processingRate = 1)
if len(output2) != 1 {
t.Errorf("Expected 1 processed message, got %d", len(output2))
}
// Queue should have 2 messages (started with 2, processed 1 leaving 1, added 1 new since space available)
messageQueue2, _ := props["_messageQueue"].([]QueuedMessage)
if len(messageQueue2) != 2 {
t.Errorf("Expected 2 messages in queue (1 remaining + 1 new), got %d", len(messageQueue2))
}
}
func TestMessageQueueLogic_ProcessingRate(t *testing.T) {
mq := MessageQueueLogic{}
props := map[string]any{
"queueCapacity": 100,
"retentionSeconds": 3600,
"processingRate": 3, // Process 3 messages per tick
}
// Add 10 messages
reqs := []*Request{}
for i := 0; i < 10; i++ {
reqs = append(reqs, &Request{ID: "msg" + string(rune(i+'0')), Type: "SEND"})
}
// First tick: queue all messages
mq.Tick(props, reqs, 1)
// Second tick: process at rate limit
output, _ := mq.Tick(props, []*Request{}, 2)
if len(output) != 3 {
t.Errorf("Expected 3 processed messages (rate limit), got %d", len(output))
}
// Check remaining queue size
messageQueue, _ := props["_messageQueue"].([]QueuedMessage)
if len(messageQueue) != 7 {
t.Errorf("Expected 7 messages remaining in queue, got %d", len(messageQueue))
}
// Third tick: process 3 more
output2, _ := mq.Tick(props, []*Request{}, 3)
if len(output2) != 3 {
t.Errorf("Expected 3 more processed messages, got %d", len(output2))
}
// Check remaining queue size
messageQueue2, _ := props["_messageQueue"].([]QueuedMessage)
if len(messageQueue2) != 4 {
t.Errorf("Expected 4 messages remaining in queue, got %d", len(messageQueue2))
}
}
func TestMessageQueueLogic_MessageRetention(t *testing.T) {
mq := MessageQueueLogic{}
props := map[string]any{
"queueCapacity": 100,
"retentionSeconds": 1, // 1 second retention
"processingRate": 0, // Don't process messages, just test retention
}
// Add messages at tick 1
reqs := []*Request{
{ID: "msg1", Type: "SEND", Timestamp: 100},
{ID: "msg2", Type: "SEND", Timestamp: 100},
}
mq.Tick(props, reqs, 1)
// Check messages are queued
messageQueue, _ := props["_messageQueue"].([]QueuedMessage)
if len(messageQueue) != 2 {
t.Errorf("Expected 2 messages in queue, got %d", len(messageQueue))
}
// Tick at time that should expire messages (tick 20 = 2000ms, retention = 1000ms)
output, _ := mq.Tick(props, []*Request{}, 20)
// Messages should be expired and removed
messageQueue2, _ := props["_messageQueue"].([]QueuedMessage)
if len(messageQueue2) != 0 {
t.Errorf("Expected messages to be expired and removed, got %d", len(messageQueue2))
}
// No output since processingRate = 0
if len(output) != 0 {
t.Errorf("Expected no output with processingRate=0, got %d", len(output))
}
}
func TestMessageQueueLogic_FIFOOrdering(t *testing.T) {
mq := MessageQueueLogic{}
props := map[string]any{
"queueCapacity": 10,
"retentionSeconds": 3600,
"processingRate": 2,
}
// Add messages in order
reqs := []*Request{
{ID: "first", Type: "SEND"},
{ID: "second", Type: "SEND"},
{ID: "third", Type: "SEND"},
}
mq.Tick(props, reqs, 1)
// Process 2 messages
output, _ := mq.Tick(props, []*Request{}, 2)
if len(output) != 2 {
t.Errorf("Expected 2 processed messages, got %d", len(output))
}
// Check FIFO order
if output[0].ID != "first" {
t.Errorf("Expected first message to be 'first', got '%s'", output[0].ID)
}
if output[1].ID != "second" {
t.Errorf("Expected second message to be 'second', got '%s'", output[1].ID)
}
// Process remaining message
output2, _ := mq.Tick(props, []*Request{}, 3)
if len(output2) != 1 {
t.Errorf("Expected 1 remaining message, got %d", len(output2))
}
if output2[0].ID != "third" {
t.Errorf("Expected remaining message to be 'third', got '%s'", output2[0].ID)
}
}
func TestMessageQueueLogic_DefaultValues(t *testing.T) {
mq := MessageQueueLogic{}
// Empty props should use defaults
props := map[string]any{}
reqs := []*Request{{ID: "msg1", Type: "SEND"}}
output, healthy := mq.Tick(props, reqs, 1)
if !healthy {
t.Errorf("Queue should be healthy with default values")
}
// Should queue the message (no immediate output)
if len(output) != 0 {
t.Errorf("Expected message to be queued (0 output), got %d", len(output))
}
// Check that message was queued with defaults
messageQueue, _ := props["_messageQueue"].([]QueuedMessage)
if len(messageQueue) != 1 {
t.Errorf("Expected 1 message queued with defaults, got %d", len(messageQueue))
}
// Process with defaults (should process up to default rate)
output2, _ := mq.Tick(props, []*Request{}, 2)
if len(output2) != 1 {
t.Errorf("Expected 1 processed message with defaults, got %d", len(output2))
}
}
func TestMessageQueueLogic_ContinuousFlow(t *testing.T) {
mq := MessageQueueLogic{}
props := map[string]any{
"queueCapacity": 5,
"retentionSeconds": 3600,
"processingRate": 2,
}
// Tick 1: Add 3 messages
reqs1 := []*Request{
{ID: "msg1", Type: "SEND"},
{ID: "msg2", Type: "SEND"},
{ID: "msg3", Type: "SEND"},
}
output1, _ := mq.Tick(props, reqs1, 1)
// Should queue all 3 messages
if len(output1) != 0 {
t.Errorf("Expected 0 output on first tick, got %d", len(output1))
}
// Tick 2: Add 2 more messages, process 2
reqs2 := []*Request{
{ID: "msg4", Type: "SEND"},
{ID: "msg5", Type: "SEND"},
}
output2, _ := mq.Tick(props, reqs2, 2)
// Should process 2 messages
if len(output2) != 2 {
t.Errorf("Expected 2 processed messages, got %d", len(output2))
}
// Should have 3 messages in queue (3 remaining + 2 new - 2 processed)
messageQueue, _ := props["_messageQueue"].([]QueuedMessage)
if len(messageQueue) != 3 {
t.Errorf("Expected 3 messages in queue, got %d", len(messageQueue))
}
// Check processing order
if output2[0].ID != "msg1" || output2[1].ID != "msg2" {
t.Errorf("Expected FIFO processing order, got %s, %s", output2[0].ID, output2[1].ID)
}
}

241
internal/simulation/microservice.go

@ -1,241 +0,0 @@
package simulation
import (
"fmt"
"hash/fnv"
"math"
)
type MicroserviceLogic struct{}
type ServiceInstance struct {
ID int
CurrentLoad int
HealthStatus string
}
// CacheEntry represents a cached item in the microservice's cache
type MicroserviceCacheEntry struct {
Data string
Timestamp int
AccessTime int
AccessCount int
}
// hash function for cache keys
func hashKey(s string) uint32 {
h := fnv.New32a()
h.Write([]byte(s))
return h.Sum32()
}
func (m MicroserviceLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) {
// Extract microservice properties
instanceCount := int(AsFloat64(props["instanceCount"]))
if instanceCount == 0 {
instanceCount = 1 // default to 1 instance
}
cpu := int(AsFloat64(props["cpu"]))
if cpu == 0 {
cpu = 2 // default 2 CPU cores
}
ramGb := int(AsFloat64(props["ramGb"]))
if ramGb == 0 {
ramGb = 4 // default 4GB RAM
}
rpsCapacity := int(AsFloat64(props["rpsCapacity"]))
if rpsCapacity == 0 {
rpsCapacity = 100 // default capacity per instance
}
scalingStrategy := AsString(props["scalingStrategy"])
if scalingStrategy == "" {
scalingStrategy = "auto"
}
// Calculate base latency based on resource specs
baseLatencyMs := m.calculateBaseLatency(cpu, ramGb)
// Auto-scaling logic: adjust instance count based on load
currentLoad := len(queue)
if scalingStrategy == "auto" {
instanceCount = m.autoScale(instanceCount, currentLoad, rpsCapacity)
props["instanceCount"] = float64(instanceCount) // update for next tick
}
// Total capacity across all instances
totalCapacity := instanceCount * rpsCapacity
// Process requests up to total capacity
toProcess := queue
if len(queue) > totalCapacity {
toProcess = queue[:totalCapacity]
}
// Initialize cache in microservice props
cache, ok := props["_microserviceCache"].(map[string]*MicroserviceCacheEntry)
if !ok {
cache = make(map[string]*MicroserviceCacheEntry)
props["_microserviceCache"] = cache
}
cacheTTL := 300000 // 5 minutes default TTL
currentTime := tick * 100 // assuming 100ms per tick
output := []*Request{} // Only cache misses go here (forwarded to database)
cacheHits := []*Request{} // Cache hits - completed locally
dbRequests := []*Request{} // Requests that need to go to database
// Process each request with cache-aside logic
for i, req := range toProcess {
// Generate cache key for this request (simulate URL patterns)
hashValue := hashKey(req.ID) % 100 // Create 100 possible "URLs"
cacheKey := fmt.Sprintf("url-%d-%s", hashValue, req.Type)
// Check cache first (Cache-Aside pattern)
entry, hit := cache[cacheKey]
if hit && !m.isCacheExpired(entry, currentTime, cacheTTL) {
// CACHE HIT - serve from cache (NO DATABASE QUERY)
reqCopy := *req
reqCopy.LatencyMS += 1 // 1ms for cache access
reqCopy.Path = append(reqCopy.Path, "microservice-cache-hit-completed")
// Update cache access tracking
entry.AccessTime = currentTime
entry.AccessCount++
// Cache hits do NOT go to database - they complete here
// In a real system, this response would go back to the client
// Store separately - these do NOT get forwarded to database
cacheHits = append(cacheHits, &reqCopy)
} else {
// CACHE MISS - need to query database
reqCopy := *req
// Add microservice processing latency
processingLatency := baseLatencyMs
// Simulate CPU-bound vs I/O-bound operations
if req.Type == "GET" {
processingLatency = baseLatencyMs // Fast reads
} else if req.Type == "POST" || req.Type == "PUT" {
processingLatency = baseLatencyMs + 10 // Writes take longer
} else if req.Type == "COMPUTE" {
processingLatency = baseLatencyMs + 50 // CPU-intensive operations
}
// Instance load affects latency (queuing delay)
instanceLoad := m.calculateInstanceLoad(i, len(toProcess), instanceCount)
if float64(instanceLoad) > float64(rpsCapacity)*0.8 { // Above 80% capacity
processingLatency += int(float64(processingLatency) * 0.5) // 50% penalty
}
reqCopy.LatencyMS += processingLatency
reqCopy.Path = append(reqCopy.Path, "microservice-cache-miss")
// Store cache key in request for when database response comes back
reqCopy.CacheKey = cacheKey
// Forward to database for actual data
dbRequests = append(dbRequests, &reqCopy)
}
}
// For cache misses, we would normally wait for database response and then cache it
// In this simulation, we'll immediately cache the "result" for future requests
for _, req := range dbRequests {
// Simulate caching the database response
cache[req.CacheKey] = &MicroserviceCacheEntry{
Data: "cached-response-data",
Timestamp: currentTime,
AccessTime: currentTime,
AccessCount: 1,
}
// Forward request to database
output = append(output, req)
}
// Health check: service is healthy if not severely overloaded
healthy := len(queue) <= totalCapacity*2 // Allow some buffering
return output, healthy
}
// isCacheExpired checks if a cache entry has expired
func (m MicroserviceLogic) isCacheExpired(entry *MicroserviceCacheEntry, currentTime, ttl int) bool {
return (currentTime - entry.Timestamp) > ttl
}
// calculateBaseLatency determines base processing time based on resources
func (m MicroserviceLogic) calculateBaseLatency(cpu, ramGb int) int {
// Better CPU and RAM = lower base latency
// Formula: base latency inversely proportional to resources
cpuFactor := float64(cpu)
ramFactor := float64(ramGb) / 4.0 // Normalize to 4GB baseline
resourceScore := cpuFactor * ramFactor
if resourceScore < 1 {
resourceScore = 1
}
baseLatency := int(50.0 / resourceScore) // 50ms baseline for 2CPU/4GB
if baseLatency < 5 {
baseLatency = 5 // Minimum 5ms processing time
}
return baseLatency
}
// autoScale implements simple auto-scaling logic
func (m MicroserviceLogic) autoScale(currentInstances, currentLoad, rpsPerInstance int) int {
// Calculate desired instances based on current load
desiredInstances := int(math.Ceil(float64(currentLoad) / float64(rpsPerInstance)))
// Scale up/down gradually (max 25% change per tick)
maxChange := int(math.Max(1, float64(currentInstances)*0.25))
if desiredInstances > currentInstances {
// Scale up
newInstances := currentInstances + maxChange
if newInstances > desiredInstances {
newInstances = desiredInstances
}
// Cap at reasonable maximum
if newInstances > 20 {
newInstances = 20
}
return newInstances
} else if desiredInstances < currentInstances {
// Scale down (more conservative)
newInstances := currentInstances - int(math.Max(1, float64(maxChange)*0.5))
if newInstances < desiredInstances {
newInstances = desiredInstances
}
// Always maintain at least 1 instance
if newInstances < 1 {
newInstances = 1
}
return newInstances
}
return currentInstances
}
// calculateInstanceLoad estimates load on a specific instance
func (m MicroserviceLogic) calculateInstanceLoad(instanceID, totalRequests, instanceCount int) int {
// Simple round-robin distribution
baseLoad := totalRequests / instanceCount
remainder := totalRequests % instanceCount
if instanceID < remainder {
return baseLoad + 1
}
return baseLoad
}

286
internal/simulation/microservice_test.go

@ -1,286 +0,0 @@
package simulation
import (
"testing"
)
func TestMicroserviceLogic_BasicProcessing(t *testing.T) {
logic := MicroserviceLogic{}
props := map[string]any{
"instanceCount": 2.0,
"cpu": 4.0,
"ramGb": 8.0,
"rpsCapacity": 100.0,
"scalingStrategy": "manual",
}
requests := []*Request{
{ID: "1", Type: "GET", LatencyMS: 0, Path: []string{}},
{ID: "2", Type: "POST", LatencyMS: 0, Path: []string{}},
}
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected microservice to be healthy")
}
if len(output) != 2 {
t.Errorf("Expected 2 processed requests, got %d", len(output))
}
// Verify latency was added
for _, req := range output {
if req.LatencyMS == 0 {
t.Error("Expected latency to be added to processed request")
}
if len(req.Path) == 0 || req.Path[len(req.Path)-1] != "microservice-processed" {
t.Error("Expected path to be updated with microservice-processed")
}
}
}
func TestMicroserviceLogic_CapacityLimit(t *testing.T) {
logic := MicroserviceLogic{}
props := map[string]any{
"instanceCount": 1.0,
"rpsCapacity": 2.0,
"scalingStrategy": "manual",
}
// Send 4 requests, capacity is 2 (1 instance * 2 RPS)
// This should be healthy since 4 <= totalCapacity*2 (4)
requests := make([]*Request, 4)
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 0}
}
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected microservice to be healthy with moderate queuing")
}
// Should only process 2 requests (capacity limit)
if len(output) != 2 {
t.Errorf("Expected 2 processed requests due to capacity limit, got %d", len(output))
}
}
func TestMicroserviceLogic_AutoScaling(t *testing.T) {
logic := MicroserviceLogic{}
props := map[string]any{
"instanceCount": 1.0,
"rpsCapacity": 10.0,
"scalingStrategy": "auto",
}
// Send 25 requests to trigger scaling
requests := make([]*Request, 25)
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 0}
}
output, healthy := logic.Tick(props, requests, 1)
// Check if instances were scaled up
newInstanceCount := int(props["instanceCount"].(float64))
if newInstanceCount <= 1 {
t.Error("Expected auto-scaling to increase instance count")
}
// Should process more than 10 requests (original capacity)
if len(output) <= 10 {
t.Errorf("Expected auto-scaling to increase processing capacity, got %d", len(output))
}
if !healthy {
t.Error("Expected microservice to be healthy after scaling")
}
}
func TestMicroserviceLogic_ResourceBasedLatency(t *testing.T) {
logic := MicroserviceLogic{}
// High-resource microservice
highResourceProps := map[string]any{
"instanceCount": 1.0,
"cpu": 8.0,
"ramGb": 16.0,
"rpsCapacity": 100.0,
"scalingStrategy": "manual",
}
// Low-resource microservice
lowResourceProps := map[string]any{
"instanceCount": 1.0,
"cpu": 1.0,
"ramGb": 1.0,
"rpsCapacity": 100.0,
"scalingStrategy": "manual",
}
request := []*Request{{ID: "1", Type: "GET", LatencyMS: 0, Path: []string{}}}
highOutput, _ := logic.Tick(highResourceProps, request, 1)
lowOutput, _ := logic.Tick(lowResourceProps, request, 1)
highLatency := highOutput[0].LatencyMS
lowLatency := lowOutput[0].LatencyMS
if lowLatency <= highLatency {
t.Errorf("Expected low-resource microservice (%dms) to have higher latency than high-resource (%dms)",
lowLatency, highLatency)
}
}
func TestMicroserviceLogic_RequestTypeLatency(t *testing.T) {
logic := MicroserviceLogic{}
props := map[string]any{
"instanceCount": 1.0,
"cpu": 2.0,
"ramGb": 4.0,
"rpsCapacity": 100.0,
"scalingStrategy": "manual",
}
getRequest := []*Request{{ID: "1", Type: "GET", LatencyMS: 0, Path: []string{}}}
postRequest := []*Request{{ID: "2", Type: "POST", LatencyMS: 0, Path: []string{}}}
computeRequest := []*Request{{ID: "3", Type: "COMPUTE", LatencyMS: 0, Path: []string{}}}
getOutput, _ := logic.Tick(props, getRequest, 1)
postOutput, _ := logic.Tick(props, postRequest, 1)
computeOutput, _ := logic.Tick(props, computeRequest, 1)
getLatency := getOutput[0].LatencyMS
postLatency := postOutput[0].LatencyMS
computeLatency := computeOutput[0].LatencyMS
if getLatency >= postLatency {
t.Errorf("Expected GET (%dms) to be faster than POST (%dms)", getLatency, postLatency)
}
if postLatency >= computeLatency {
t.Errorf("Expected POST (%dms) to be faster than COMPUTE (%dms)", postLatency, computeLatency)
}
}
func TestMicroserviceLogic_HighLoadLatencyPenalty(t *testing.T) {
logic := MicroserviceLogic{}
props := map[string]any{
"instanceCount": 1.0,
"cpu": 2.0,
"ramGb": 4.0,
"rpsCapacity": 10.0,
"scalingStrategy": "manual",
}
// Low load scenario
lowLoadRequest := []*Request{{ID: "1", Type: "GET", LatencyMS: 0, Path: []string{}}}
lowOutput, _ := logic.Tick(props, lowLoadRequest, 1)
lowLatency := lowOutput[0].LatencyMS
// High load scenario (above 80% capacity threshold)
highLoadRequests := make([]*Request, 9) // 90% of 10 RPS capacity
for i := range highLoadRequests {
highLoadRequests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 0, Path: []string{}}
}
highOutput, _ := logic.Tick(props, highLoadRequests, 1)
// Check if first request has higher latency due to load
highLatency := highOutput[0].LatencyMS
if highLatency <= lowLatency {
t.Errorf("Expected high load scenario (%dms) to have higher latency than low load (%dms)",
highLatency, lowLatency)
}
}
func TestMicroserviceLogic_DefaultValues(t *testing.T) {
logic := MicroserviceLogic{}
// Empty props should use defaults
props := map[string]any{}
requests := []*Request{{ID: "1", Type: "GET", LatencyMS: 0, Path: []string{}}}
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected microservice to be healthy with default values")
}
if len(output) != 1 {
t.Errorf("Expected 1 processed request with defaults, got %d", len(output))
}
// Should have reasonable default latency
if output[0].LatencyMS <= 0 || output[0].LatencyMS > 100 {
t.Errorf("Expected reasonable default latency, got %dms", output[0].LatencyMS)
}
}
func TestMicroserviceLogic_UnhealthyWhenOverloaded(t *testing.T) {
logic := MicroserviceLogic{}
props := map[string]any{
"instanceCount": 1.0,
"rpsCapacity": 5.0,
"scalingStrategy": "manual", // No auto-scaling
}
// Send way more requests than capacity (5 * 2 = 10 max before unhealthy)
requests := make([]*Request, 15) // 3x capacity
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 0}
}
output, healthy := logic.Tick(props, requests, 1)
if healthy {
t.Error("Expected microservice to be unhealthy when severely overloaded")
}
// Should still process up to capacity
if len(output) != 5 {
t.Errorf("Expected 5 processed requests despite being overloaded, got %d", len(output))
}
}
func TestMicroserviceLogic_RoundRobinDistribution(t *testing.T) {
logic := MicroserviceLogic{}
props := map[string]any{
"instanceCount": 3.0,
"rpsCapacity": 10.0,
"scalingStrategy": "manual",
}
// Send 6 requests to be distributed across 3 instances
requests := make([]*Request, 6)
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 0, Path: []string{}}
}
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected microservice to be healthy")
}
if len(output) != 6 {
t.Errorf("Expected 6 processed requests, got %d", len(output))
}
// All requests should be processed (within total capacity of 30)
for _, req := range output {
if req.LatencyMS <= 0 {
t.Error("Expected all requests to have added latency")
}
}
}

221
internal/simulation/monitoring.go

@ -1,221 +0,0 @@
package simulation
type MonitoringLogic struct{}
type MetricData struct {
Timestamp int
LatencySum int
RequestCount int
ErrorCount int
QueueSize int
}
type AlertEvent struct {
Timestamp int
MetricType string
Value float64
Threshold float64
Unit string
Severity string
}
func (m MonitoringLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) {
// Extract monitoring properties
tool := AsString(props["tool"])
if tool == "" {
tool = "Prometheus" // default monitoring tool
}
alertMetric := AsString(props["alertMetric"])
if alertMetric == "" {
alertMetric = "latency" // default to latency monitoring
}
thresholdValue := int(AsFloat64(props["thresholdValue"]))
if thresholdValue == 0 {
thresholdValue = 100 // default threshold
}
thresholdUnit := AsString(props["thresholdUnit"])
if thresholdUnit == "" {
thresholdUnit = "ms" // default unit
}
// Get historical metrics from props
metrics, ok := props["_metrics"].([]MetricData)
if !ok {
metrics = []MetricData{}
}
// Get alert history
alerts, ok := props["_alerts"].([]AlertEvent)
if !ok {
alerts = []AlertEvent{}
}
currentTime := tick * 100 // Convert tick to milliseconds
// Process all incoming requests (monitoring is pass-through)
output := []*Request{}
totalLatency := 0
errorCount := 0
for _, req := range queue {
// Create a copy of the request to forward
reqCopy := *req
// Add minimal monitoring overhead (1-2ms for metric collection)
monitoringOverhead := 1
if tool == "Datadog" || tool == "New Relic" {
monitoringOverhead = 2 // More feature-rich tools have slightly higher overhead
}
reqCopy.LatencyMS += monitoringOverhead
reqCopy.Path = append(reqCopy.Path, "monitored")
// Collect metrics from the request
totalLatency += req.LatencyMS
// Simple heuristic: requests with high latency are considered errors
if req.LatencyMS > 1000 { // 1 second threshold for errors
errorCount++
}
output = append(output, &reqCopy)
}
// Calculate current metrics
avgLatency := 0.0
if len(queue) > 0 {
avgLatency = float64(totalLatency) / float64(len(queue))
}
// Store current metrics
currentMetric := MetricData{
Timestamp: currentTime,
LatencySum: totalLatency,
RequestCount: len(queue),
ErrorCount: errorCount,
QueueSize: len(queue),
}
// Add to metrics history (keep last 10 data points)
metrics = append(metrics, currentMetric)
if len(metrics) > 10 {
metrics = metrics[1:]
}
// Check alert conditions
shouldAlert := false
alertValue := 0.0
switch alertMetric {
case "latency":
alertValue = avgLatency
if avgLatency > float64(thresholdValue) && len(queue) > 0 {
shouldAlert = true
}
case "throughput":
alertValue = float64(len(queue))
if len(queue) < thresholdValue { // Low throughput alert
shouldAlert = true
}
case "error_rate":
errorRate := 0.0
if len(queue) > 0 {
errorRate = float64(errorCount) / float64(len(queue)) * 100
}
alertValue = errorRate
if errorRate > float64(thresholdValue) {
shouldAlert = true
}
case "queue_size":
alertValue = float64(len(queue))
if len(queue) > thresholdValue {
shouldAlert = true
}
}
// Generate alert if threshold exceeded
if shouldAlert {
severity := "warning"
if alertValue > float64(thresholdValue)*1.5 { // 150% of threshold
severity = "critical"
}
alert := AlertEvent{
Timestamp: currentTime,
MetricType: alertMetric,
Value: alertValue,
Threshold: float64(thresholdValue),
Unit: thresholdUnit,
Severity: severity,
}
// Only add alert if it's not a duplicate of the last alert
if len(alerts) == 0 || !m.isDuplicateAlert(alerts[len(alerts)-1], alert) {
alerts = append(alerts, alert)
}
// Keep only last 20 alerts
if len(alerts) > 20 {
alerts = alerts[1:]
}
}
// Update props with collected data
props["_metrics"] = metrics
props["_alerts"] = alerts
props["_currentLatency"] = avgLatency
props["_alertCount"] = len(alerts)
// Monitoring system health - it's healthy unless it's completely overloaded
healthy := len(queue) < 10000 // Can handle very high loads
// If too many critical alerts recently, mark as unhealthy
recentCriticalAlerts := 0
for _, alert := range alerts {
if currentTime-alert.Timestamp < 10000 && alert.Severity == "critical" { // Last 10 seconds
recentCriticalAlerts++
}
}
if recentCriticalAlerts > 5 {
healthy = false
}
return output, healthy
}
// isDuplicateAlert checks if an alert is similar to the previous one to avoid spam
func (m MonitoringLogic) isDuplicateAlert(prev, current AlertEvent) bool {
return prev.MetricType == current.MetricType &&
prev.Severity == current.Severity &&
(current.Timestamp-prev.Timestamp) < 5000 // Within 5 seconds
}
// Helper function to calculate moving average
func (m MonitoringLogic) calculateMovingAverage(metrics []MetricData, window int) float64 {
if len(metrics) == 0 {
return 0
}
start := 0
if len(metrics) > window {
start = len(metrics) - window
}
sum := 0.0
count := 0
for i := start; i < len(metrics); i++ {
if metrics[i].RequestCount > 0 {
sum += float64(metrics[i].LatencySum) / float64(metrics[i].RequestCount)
count++
}
}
if count == 0 {
return 0
}
return sum / float64(count)
}

411
internal/simulation/monitoring_test.go

@ -1,411 +0,0 @@
package simulation
import (
"testing"
)
func TestMonitoringLogic_BasicPassthrough(t *testing.T) {
logic := MonitoringLogic{}
props := map[string]any{
"tool": "Prometheus",
"alertMetric": "latency",
"thresholdValue": 100.0,
"thresholdUnit": "ms",
}
requests := []*Request{
{ID: "1", Type: "GET", LatencyMS: 50, Path: []string{}},
{ID: "2", Type: "POST", LatencyMS: 75, Path: []string{}},
}
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected monitoring to be healthy")
}
if len(output) != 2 {
t.Errorf("Expected 2 requests to pass through monitoring, got %d", len(output))
}
// Verify minimal latency overhead was added
for i, req := range output {
originalLatency := requests[i].LatencyMS
if req.LatencyMS <= originalLatency {
t.Errorf("Expected monitoring overhead to be added to latency")
}
if req.LatencyMS > originalLatency+5 {
t.Errorf("Expected minimal monitoring overhead, got %d ms added", req.LatencyMS-originalLatency)
}
if len(req.Path) == 0 || req.Path[len(req.Path)-1] != "monitored" {
t.Error("Expected path to be updated with 'monitored'")
}
}
}
func TestMonitoringLogic_MetricsCollection(t *testing.T) {
logic := MonitoringLogic{}
props := map[string]any{
"tool": "Datadog",
"alertMetric": "latency",
"thresholdValue": 100.0,
"thresholdUnit": "ms",
}
requests := []*Request{
{ID: "1", Type: "GET", LatencyMS: 50},
{ID: "2", Type: "POST", LatencyMS: 150},
{ID: "3", Type: "GET", LatencyMS: 75},
}
_, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected monitoring to be healthy")
}
// Check that metrics were collected
metrics, ok := props["_metrics"].([]MetricData)
if !ok {
t.Error("Expected metrics to be collected in props")
}
if len(metrics) != 1 {
t.Errorf("Expected 1 metric data point, got %d", len(metrics))
}
metric := metrics[0]
if metric.RequestCount != 3 {
t.Errorf("Expected 3 requests counted, got %d", metric.RequestCount)
}
if metric.LatencySum != 275 { // 50 + 150 + 75
t.Errorf("Expected latency sum of 275, got %d", metric.LatencySum)
}
// Check current latency calculation
currentLatency, ok := props["_currentLatency"].(float64)
if !ok {
t.Error("Expected current latency to be calculated")
}
if currentLatency < 90 || currentLatency > 95 {
t.Errorf("Expected average latency around 91.67, got %f", currentLatency)
}
}
func TestMonitoringLogic_LatencyAlert(t *testing.T) {
logic := MonitoringLogic{}
props := map[string]any{
"tool": "Prometheus",
"alertMetric": "latency",
"thresholdValue": 80.0,
"thresholdUnit": "ms",
}
// Send requests that exceed latency threshold
requests := []*Request{
{ID: "1", Type: "GET", LatencyMS: 100},
{ID: "2", Type: "POST", LatencyMS: 120},
}
_, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected monitoring to be healthy despite alerts")
}
// Check that alert was generated
alerts, ok := props["_alerts"].([]AlertEvent)
if !ok {
t.Error("Expected alerts to be stored in props")
}
if len(alerts) != 1 {
t.Errorf("Expected 1 alert to be generated, got %d", len(alerts))
}
alert := alerts[0]
if alert.MetricType != "latency" {
t.Errorf("Expected latency alert, got %s", alert.MetricType)
}
if alert.Threshold != 80.0 {
t.Errorf("Expected threshold of 80, got %f", alert.Threshold)
}
if alert.Value < 80.0 {
t.Errorf("Expected alert value to exceed threshold, got %f", alert.Value)
}
if alert.Severity != "warning" {
t.Errorf("Expected warning severity, got %s", alert.Severity)
}
}
func TestMonitoringLogic_ErrorRateAlert(t *testing.T) {
logic := MonitoringLogic{}
props := map[string]any{
"tool": "Prometheus",
"alertMetric": "error_rate",
"thresholdValue": 20.0, // 20% error rate threshold
"thresholdUnit": "percent",
}
// Send mix of normal and high-latency (error) requests
requests := []*Request{
{ID: "1", Type: "GET", LatencyMS: 100}, // normal
{ID: "2", Type: "POST", LatencyMS: 1200}, // error (>1000ms)
{ID: "3", Type: "GET", LatencyMS: 200}, // normal
{ID: "4", Type: "POST", LatencyMS: 1500}, // error
}
_, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected monitoring to be healthy")
}
// Check that error rate alert was generated (50% error rate > 20% threshold)
alerts, ok := props["_alerts"].([]AlertEvent)
if !ok {
t.Error("Expected alerts to be stored in props")
}
if len(alerts) != 1 {
t.Errorf("Expected 1 alert to be generated, got %d", len(alerts))
}
alert := alerts[0]
if alert.MetricType != "error_rate" {
t.Errorf("Expected error_rate alert, got %s", alert.MetricType)
}
if alert.Value != 50.0 { // 2 errors out of 4 requests = 50%
t.Errorf("Expected 50%% error rate, got %f", alert.Value)
}
}
func TestMonitoringLogic_QueueSizeAlert(t *testing.T) {
logic := MonitoringLogic{}
props := map[string]any{
"tool": "Prometheus",
"alertMetric": "queue_size",
"thresholdValue": 5.0,
"thresholdUnit": "requests",
}
// Send more requests than threshold
requests := make([]*Request, 8)
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 50}
}
_, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected monitoring to be healthy with queue size alert")
}
// Check that queue size alert was generated
alerts, ok := props["_alerts"].([]AlertEvent)
if !ok {
t.Error("Expected alerts to be stored in props")
}
if len(alerts) != 1 {
t.Errorf("Expected 1 alert to be generated, got %d", len(alerts))
}
alert := alerts[0]
if alert.MetricType != "queue_size" {
t.Errorf("Expected queue_size alert, got %s", alert.MetricType)
}
if alert.Value != 8.0 {
t.Errorf("Expected queue size of 8, got %f", alert.Value)
}
}
func TestMonitoringLogic_CriticalAlert(t *testing.T) {
logic := MonitoringLogic{}
props := map[string]any{
"tool": "Prometheus",
"alertMetric": "latency",
"thresholdValue": 100.0,
"thresholdUnit": "ms",
}
// Send requests with very high latency (150% of threshold)
requests := []*Request{
{ID: "1", Type: "GET", LatencyMS: 180}, // 180 > 150 (1.5 * 100)
{ID: "2", Type: "POST", LatencyMS: 200},
}
_, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected monitoring to be healthy")
}
alerts, ok := props["_alerts"].([]AlertEvent)
if !ok {
t.Error("Expected alerts to be stored in props")
}
if len(alerts) != 1 {
t.Errorf("Expected 1 alert to be generated, got %d", len(alerts))
}
alert := alerts[0]
if alert.Severity != "critical" {
t.Errorf("Expected critical severity for high threshold breach, got %s", alert.Severity)
}
}
func TestMonitoringLogic_DuplicateAlertSuppression(t *testing.T) {
logic := MonitoringLogic{}
props := map[string]any{
"tool": "Prometheus",
"alertMetric": "latency",
"thresholdValue": 80.0,
"thresholdUnit": "ms",
}
requests := []*Request{
{ID: "1", Type: "GET", LatencyMS: 100},
}
// First tick - should generate alert
logic.Tick(props, requests, 1)
alerts, _ := props["_alerts"].([]AlertEvent)
if len(alerts) != 1 {
t.Errorf("Expected 1 alert after first tick, got %d", len(alerts))
}
// Second tick immediately after - should suppress duplicate
logic.Tick(props, requests, 2)
alerts, _ = props["_alerts"].([]AlertEvent)
if len(alerts) != 1 {
t.Errorf("Expected duplicate alert to be suppressed, got %d alerts", len(alerts))
}
}
func TestMonitoringLogic_DefaultValues(t *testing.T) {
logic := MonitoringLogic{}
// Empty props should use defaults
props := map[string]any{}
requests := []*Request{{ID: "1", Type: "GET", LatencyMS: 50, Path: []string{}}}
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected monitoring to be healthy with default values")
}
if len(output) != 1 {
t.Errorf("Expected 1 request to pass through, got %d", len(output))
}
// Should have reasonable default monitoring overhead
if output[0].LatencyMS <= 50 || output[0].LatencyMS > 55 {
t.Errorf("Expected default monitoring overhead, got %dms total", output[0].LatencyMS)
}
}
func TestMonitoringLogic_ToolSpecificOverhead(t *testing.T) {
logic := MonitoringLogic{}
// Test Prometheus (lower overhead)
propsPrometheus := map[string]any{
"tool": "Prometheus",
}
// Test Datadog (higher overhead)
propsDatadog := map[string]any{
"tool": "Datadog",
}
request := []*Request{{ID: "1", Type: "GET", LatencyMS: 50, Path: []string{}}}
prometheusOutput, _ := logic.Tick(propsPrometheus, request, 1)
datadogOutput, _ := logic.Tick(propsDatadog, request, 1)
prometheusOverhead := prometheusOutput[0].LatencyMS - 50
datadogOverhead := datadogOutput[0].LatencyMS - 50
if datadogOverhead <= prometheusOverhead {
t.Errorf("Expected Datadog (%dms) to have higher overhead than Prometheus (%dms)",
datadogOverhead, prometheusOverhead)
}
}
func TestMonitoringLogic_UnhealthyWithManyAlerts(t *testing.T) {
logic := MonitoringLogic{}
props := map[string]any{
"tool": "Prometheus",
"alertMetric": "latency",
"thresholdValue": 50.0,
"thresholdUnit": "ms",
}
// Manually create many recent critical alerts to simulate an unhealthy state
currentTime := 10000 // 10 seconds
recentAlerts := []AlertEvent{
{Timestamp: currentTime - 1000, MetricType: "latency", Severity: "critical", Value: 200},
{Timestamp: currentTime - 2000, MetricType: "latency", Severity: "critical", Value: 180},
{Timestamp: currentTime - 3000, MetricType: "latency", Severity: "critical", Value: 190},
{Timestamp: currentTime - 4000, MetricType: "latency", Severity: "critical", Value: 170},
{Timestamp: currentTime - 5000, MetricType: "latency", Severity: "critical", Value: 160},
{Timestamp: currentTime - 6000, MetricType: "latency", Severity: "critical", Value: 150},
}
// Set up the props with existing critical alerts
props["_alerts"] = recentAlerts
// Make a request that would trigger another alert (low latency to avoid triggering new alert)
requests := []*Request{{ID: "1", Type: "GET", LatencyMS: 40}}
// This tick should recognize the existing critical alerts and mark system as unhealthy
_, healthy := logic.Tick(props, requests, 100) // tick 100 = 10000ms
if healthy {
t.Error("Expected monitoring to be unhealthy due to many recent critical alerts")
}
}
func TestMonitoringLogic_MetricsHistoryLimit(t *testing.T) {
logic := MonitoringLogic{}
props := map[string]any{
"tool": "Prometheus",
}
request := []*Request{{ID: "1", Type: "GET", LatencyMS: 50}}
// Generate more than 10 metric data points
for i := 0; i < 15; i++ {
logic.Tick(props, request, i)
}
metrics, ok := props["_metrics"].([]MetricData)
if !ok {
t.Error("Expected metrics to be stored")
}
if len(metrics) != 10 {
t.Errorf("Expected metrics history to be limited to 10, got %d", len(metrics))
}
}

55
internal/simulation/testdata/cache_design.json vendored

@ -1,55 +0,0 @@
{
"nodes": [
{
"id": "webserver",
"type": "webserver",
"position": { "x": 0, "y": 0 },
"props": {
"label": "Web Server",
"rpsCapacity": 100
}
},
{
"id": "cache",
"type": "cache",
"position": { "x": 100, "y": 0 },
"props": {
"label": "Redis Cache",
"cacheTTL": 300000,
"maxEntries": 1000,
"evictionPolicy": "LRU"
}
},
{
"id": "database",
"type": "database",
"position": { "x": 200, "y": 0 },
"props": {
"label": "Primary DB",
"replication": 2,
"maxRPS": 500,
"baseLatencyMs": 20
}
}
],
"connections": [
{
"source": "webserver",
"target": "cache",
"label": "Cache Lookup",
"direction": "forward",
"protocol": "Redis",
"tls": false,
"capacity": 1000
},
{
"source": "cache",
"target": "database",
"label": "Cache Miss",
"direction": "forward",
"protocol": "TCP",
"tls": true,
"capacity": 1000
}
]
}

35
internal/simulation/testdata/database_design.json vendored

@ -1,35 +0,0 @@
{
"nodes": [
{
"id": "webserver",
"type": "webserver",
"position": { "x": 0, "y": 0 },
"props": {
"label": "Web Server",
"rpsCapacity": 100
}
},
{
"id": "database",
"type": "database",
"position": { "x": 100, "y": 0 },
"props": {
"label": "Primary DB",
"replication": 2,
"maxRPS": 500,
"baseLatencyMs": 15
}
}
],
"connections": [
{
"source": "webserver",
"target": "database",
"label": "DB Queries",
"direction": "forward",
"protocol": "TCP",
"tls": true,
"capacity": 1000
}
]
}

188
internal/simulation/testdata/datapipeline_design.json vendored

@ -1,188 +0,0 @@
{
"nodes": [
{
"id": "data-source",
"type": "webserver",
"position": { "x": 100, "y": 200 },
"props": {
"label": "Data Ingestion API",
"rpsCapacity": 500
}
},
{
"id": "raw-data-queue",
"type": "messageQueue",
"position": { "x": 300, "y": 200 },
"props": {
"label": "Raw Data Queue",
"queueCapacity": 10000,
"retentionSeconds": 3600,
"processingRate": 200
}
},
{
"id": "etl-pipeline-1",
"type": "data pipeline",
"position": { "x": 500, "y": 150 },
"props": {
"label": "Data Cleansing Pipeline",
"batchSize": 100,
"transformation": "validate"
}
},
{
"id": "etl-pipeline-2",
"type": "data pipeline",
"position": { "x": 500, "y": 250 },
"props": {
"label": "Data Transformation Pipeline",
"batchSize": 50,
"transformation": "aggregate"
}
},
{
"id": "ml-pipeline",
"type": "data pipeline",
"position": { "x": 700, "y": 150 },
"props": {
"label": "ML Feature Pipeline",
"batchSize": 200,
"transformation": "enrich"
}
},
{
"id": "analytics-pipeline",
"type": "data pipeline",
"position": { "x": 700, "y": 250 },
"props": {
"label": "Analytics Pipeline",
"batchSize": 500,
"transformation": "join"
}
},
{
"id": "cache-1",
"type": "cache",
"position": { "x": 900, "y": 150 },
"props": {
"label": "Feature Cache",
"cacheTTL": 300,
"maxEntries": 50000,
"evictionPolicy": "LRU"
}
},
{
"id": "data-warehouse",
"type": "database",
"position": { "x": 900, "y": 250 },
"props": {
"label": "Data Warehouse",
"replication": 3,
"maxRPS": 1000,
"baseLatencyMs": 50
}
},
{
"id": "monitoring-1",
"type": "monitoring/alerting",
"position": { "x": 500, "y": 350 },
"props": {
"label": "Pipeline Monitor",
"tool": "Datadog",
"alertMetric": "latency",
"thresholdValue": 1000,
"thresholdUnit": "ms"
}
},
{
"id": "compression-pipeline",
"type": "data pipeline",
"position": { "x": 300, "y": 350 },
"props": {
"label": "Data Compression",
"batchSize": 1000,
"transformation": "compress"
}
}
],
"connections": [
{
"source": "data-source",
"target": "raw-data-queue",
"label": "Raw Data Stream",
"protocol": "http"
},
{
"source": "raw-data-queue",
"target": "etl-pipeline-1",
"label": "Data Validation",
"protocol": "tcp"
},
{
"source": "raw-data-queue",
"target": "etl-pipeline-2",
"label": "Data Transformation",
"protocol": "tcp"
},
{
"source": "etl-pipeline-1",
"target": "ml-pipeline",
"label": "Clean Data",
"protocol": "tcp"
},
{
"source": "etl-pipeline-2",
"target": "analytics-pipeline",
"label": "Transformed Data",
"protocol": "tcp"
},
{
"source": "ml-pipeline",
"target": "cache-1",
"label": "ML Features",
"protocol": "tcp"
},
{
"source": "analytics-pipeline",
"target": "data-warehouse",
"label": "Analytics Data",
"protocol": "tcp"
},
{
"source": "etl-pipeline-1",
"target": "monitoring-1",
"label": "Pipeline Metrics",
"protocol": "http"
},
{
"source": "etl-pipeline-2",
"target": "monitoring-1",
"label": "Pipeline Metrics",
"protocol": "http"
},
{
"source": "ml-pipeline",
"target": "monitoring-1",
"label": "Pipeline Metrics",
"protocol": "http"
},
{
"source": "analytics-pipeline",
"target": "monitoring-1",
"label": "Pipeline Metrics",
"protocol": "http"
},
{
"source": "raw-data-queue",
"target": "compression-pipeline",
"label": "Archive Stream",
"protocol": "tcp"
},
{
"source": "compression-pipeline",
"target": "data-warehouse",
"label": "Compressed Archive",
"protocol": "tcp"
}
]
}

53
internal/simulation/testdata/messagequeue_design.json vendored

@ -1,53 +0,0 @@
{
"nodes": [
{
"id": "producer",
"type": "webserver",
"position": { "x": 0, "y": 0 },
"props": {
"label": "Message Producer",
"rpsCapacity": 50
}
},
{
"id": "messagequeue",
"type": "messageQueue",
"position": { "x": 100, "y": 0 },
"props": {
"label": "Event Queue",
"queueCapacity": 1000,
"retentionSeconds": 3600,
"processingRate": 100
}
},
{
"id": "consumer",
"type": "webserver",
"position": { "x": 200, "y": 0 },
"props": {
"label": "Message Consumer",
"rpsCapacity": 80
}
}
],
"connections": [
{
"source": "producer",
"target": "messagequeue",
"label": "Publish Messages",
"direction": "forward",
"protocol": "AMQP",
"tls": false,
"capacity": 1000
},
{
"source": "messagequeue",
"target": "consumer",
"label": "Consume Messages",
"direction": "forward",
"protocol": "AMQP",
"tls": false,
"capacity": 1000
}
]
}

96
internal/simulation/testdata/microservice_design.json vendored

@ -1,96 +0,0 @@
{
"nodes": [
{
"id": "webserver-1",
"type": "webserver",
"position": { "x": 100, "y": 200 },
"props": {
"label": "API Gateway",
"rpsCapacity": 200
}
},
{
"id": "lb-1",
"type": "loadbalancer",
"position": { "x": 300, "y": 200 },
"props": {
"label": "API Gateway",
"algorithm": "round-robin"
}
},
{
"id": "microservice-1",
"type": "microservice",
"position": { "x": 500, "y": 150 },
"props": {
"label": "User Service",
"instanceCount": 3,
"cpu": 4,
"ramGb": 8,
"rpsCapacity": 100,
"monthlyUsd": 150,
"scalingStrategy": "auto",
"apiVersion": "v2"
}
},
{
"id": "microservice-2",
"type": "microservice",
"position": { "x": 500, "y": 250 },
"props": {
"label": "Order Service",
"instanceCount": 2,
"cpu": 2,
"ramGb": 4,
"rpsCapacity": 80,
"monthlyUsd": 90,
"scalingStrategy": "manual",
"apiVersion": "v1"
}
},
{
"id": "db-1",
"type": "database",
"position": { "x": 700, "y": 200 },
"props": {
"label": "PostgreSQL",
"replication": 2,
"maxRPS": 500,
"baseLatencyMs": 15
}
}
],
"connections": [
{
"source": "webserver-1",
"target": "lb-1",
"label": "HTTPS Requests",
"protocol": "https",
"tls": true
},
{
"source": "lb-1",
"target": "microservice-1",
"label": "User API",
"protocol": "http"
},
{
"source": "lb-1",
"target": "microservice-2",
"label": "Order API",
"protocol": "http"
},
{
"source": "microservice-1",
"target": "db-1",
"label": "User Queries",
"protocol": "tcp"
},
{
"source": "microservice-2",
"target": "db-1",
"label": "Order Queries",
"protocol": "tcp"
}
]
}

127
internal/simulation/testdata/monitoring_design.json vendored

@ -1,127 +0,0 @@
{
"nodes": [
{
"id": "webserver-1",
"type": "webserver",
"position": { "x": 100, "y": 200 },
"props": {
"label": "Web Server",
"rpsCapacity": 100
}
},
{
"id": "monitor-1",
"type": "monitoring/alerting",
"position": { "x": 300, "y": 200 },
"props": {
"label": "Prometheus Monitor",
"tool": "Prometheus",
"alertMetric": "latency",
"thresholdValue": 80,
"thresholdUnit": "ms"
}
},
{
"id": "lb-1",
"type": "loadbalancer",
"position": { "x": 500, "y": 200 },
"props": {
"label": "Load Balancer",
"algorithm": "round-robin"
}
},
{
"id": "microservice-1",
"type": "microservice",
"position": { "x": 700, "y": 150 },
"props": {
"label": "User Service",
"instanceCount": 2,
"cpu": 2,
"ramGb": 4,
"rpsCapacity": 50,
"scalingStrategy": "auto"
}
},
{
"id": "microservice-2",
"type": "microservice",
"position": { "x": 700, "y": 250 },
"props": {
"label": "Order Service",
"instanceCount": 1,
"cpu": 1,
"ramGb": 2,
"rpsCapacity": 30,
"scalingStrategy": "manual"
}
},
{
"id": "monitor-2",
"type": "monitoring/alerting",
"position": { "x": 900, "y": 200 },
"props": {
"label": "Error Rate Monitor",
"tool": "Datadog",
"alertMetric": "error_rate",
"thresholdValue": 5,
"thresholdUnit": "percent"
}
},
{
"id": "db-1",
"type": "database",
"position": { "x": 1100, "y": 200 },
"props": {
"label": "PostgreSQL",
"replication": 2,
"maxRPS": 200,
"baseLatencyMs": 15
}
}
],
"connections": [
{
"source": "webserver-1",
"target": "monitor-1",
"label": "HTTP Requests",
"protocol": "http"
},
{
"source": "monitor-1",
"target": "lb-1",
"label": "Monitored Requests",
"protocol": "http"
},
{
"source": "lb-1",
"target": "microservice-1",
"label": "User API",
"protocol": "http"
},
{
"source": "lb-1",
"target": "microservice-2",
"label": "Order API",
"protocol": "http"
},
{
"source": "microservice-1",
"target": "monitor-2",
"label": "Service Metrics",
"protocol": "http"
},
{
"source": "microservice-2",
"target": "monitor-2",
"label": "Service Metrics",
"protocol": "http"
},
{
"source": "monitor-2",
"target": "db-1",
"label": "Database Queries",
"protocol": "tcp"
}
]
}

2
internal/simulation/testdata/simple_design.json vendored

@ -16,7 +16,7 @@
"props": { "props": {
"label": "Web Server", "label": "Web Server",
"instanceSize": "medium", "instanceSize": "medium",
"rpsCapacity": 5, "capacityRPS": 5,
"baseLatencyMs": 50, "baseLatencyMs": 50,
"penaltyPerRPS": 10 "penaltyPerRPS": 10
} }

164
internal/simulation/testdata/thirdpartyservice_design.json vendored

@ -1,164 +0,0 @@
{
"nodes": [
{
"id": "webserver-1",
"type": "webserver",
"position": { "x": 100, "y": 200 },
"props": {
"label": "E-commerce API",
"rpsCapacity": 200
}
},
{
"id": "microservice-1",
"type": "microservice",
"position": { "x": 300, "y": 200 },
"props": {
"label": "Payment Service",
"instanceCount": 2,
"cpu": 4,
"ramGb": 8,
"rpsCapacity": 100,
"scalingStrategy": "auto"
}
},
{
"id": "stripe-service",
"type": "third party service",
"position": { "x": 500, "y": 150 },
"props": {
"label": "Stripe Payments",
"provider": "Stripe",
"latency": 180
}
},
{
"id": "twilio-service",
"type": "third party service",
"position": { "x": 500, "y": 250 },
"props": {
"label": "SMS Notifications",
"provider": "Twilio",
"latency": 250
}
},
{
"id": "microservice-2",
"type": "microservice",
"position": { "x": 300, "y": 350 },
"props": {
"label": "Notification Service",
"instanceCount": 1,
"cpu": 2,
"ramGb": 4,
"rpsCapacity": 50,
"scalingStrategy": "manual"
}
},
{
"id": "sendgrid-service",
"type": "third party service",
"position": { "x": 500, "y": 350 },
"props": {
"label": "Email Service",
"provider": "SendGrid",
"latency": 200
}
},
{
"id": "slack-service",
"type": "third party service",
"position": { "x": 500, "y": 450 },
"props": {
"label": "Slack Alerts",
"provider": "Slack",
"latency": 300
}
},
{
"id": "monitor-1",
"type": "monitoring/alerting",
"position": { "x": 700, "y": 200 },
"props": {
"label": "System Monitor",
"tool": "Datadog",
"alertMetric": "latency",
"thresholdValue": 500,
"thresholdUnit": "ms"
}
},
{
"id": "db-1",
"type": "database",
"position": { "x": 700, "y": 350 },
"props": {
"label": "Transaction DB",
"replication": 2,
"maxRPS": 300,
"baseLatencyMs": 20
}
}
],
"connections": [
{
"source": "webserver-1",
"target": "microservice-1",
"label": "Payment Requests",
"protocol": "https"
},
{
"source": "microservice-1",
"target": "stripe-service",
"label": "Process Payment",
"protocol": "https"
},
{
"source": "microservice-1",
"target": "twilio-service",
"label": "SMS Confirmation",
"protocol": "https"
},
{
"source": "webserver-1",
"target": "microservice-2",
"label": "Notification Requests",
"protocol": "https"
},
{
"source": "microservice-2",
"target": "sendgrid-service",
"label": "Send Email",
"protocol": "https"
},
{
"source": "microservice-2",
"target": "slack-service",
"label": "Admin Alerts",
"protocol": "https"
},
{
"source": "stripe-service",
"target": "monitor-1",
"label": "Payment Metrics",
"protocol": "http"
},
{
"source": "twilio-service",
"target": "monitor-1",
"label": "SMS Metrics",
"protocol": "http"
},
{
"source": "sendgrid-service",
"target": "monitor-1",
"label": "Email Metrics",
"protocol": "http"
},
{
"source": "monitor-1",
"target": "db-1",
"label": "Store Metrics",
"protocol": "tcp"
}
]
}

219
internal/simulation/thirdpartyservice.go

@ -1,219 +0,0 @@
package simulation
import (
"math/rand"
)
type ThirdPartyServiceLogic struct{}
type ServiceStatus struct {
IsUp bool
LastCheck int
FailureCount int
SuccessCount int
RateLimitHits int
}
func (t ThirdPartyServiceLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) {
// Extract third-party service properties
provider := AsString(props["provider"])
if provider == "" {
provider = "Generic" // default provider
}
baseLatency := int(AsFloat64(props["latency"]))
if baseLatency == 0 {
baseLatency = 200 // default 200ms latency
}
// Get service status from props (persistent state)
status, ok := props["_serviceStatus"].(ServiceStatus)
if !ok {
status = ServiceStatus{
IsUp: true,
LastCheck: tick,
FailureCount: 0,
SuccessCount: 0,
RateLimitHits: 0,
}
}
currentTime := tick * 100 // Convert tick to milliseconds
// Simulate service availability and characteristics based on provider
reliability := t.getProviderReliability(provider)
rateLimitRPS := t.getProviderRateLimit(provider)
latencyVariance := t.getProviderLatencyVariance(provider)
// Check if service is down and should recover
if !status.IsUp {
// Services typically recover after some time
if currentTime-status.LastCheck > 30000 { // 30 seconds downtime
status.IsUp = true
status.FailureCount = 0
}
}
// Apply rate limiting - third-party services often have strict limits
requestsThisTick := len(queue)
if requestsThisTick > rateLimitRPS {
status.RateLimitHits++
// Only process up to rate limit
queue = queue[:rateLimitRPS]
}
output := []*Request{}
for _, req := range queue {
reqCopy := *req
// Simulate service availability
if !status.IsUp {
// Service is down - simulate timeout/error
reqCopy.LatencyMS += 10000 // 10 second timeout
reqCopy.Path = append(reqCopy.Path, "third-party-timeout")
status.FailureCount++
} else {
// Service is up - calculate response time
serviceLatency := t.calculateServiceLatency(provider, baseLatency, latencyVariance)
// Random failure based on reliability
if rand.Float64() > reliability {
// Service call failed
serviceLatency += 5000 // 5 second timeout on failure
reqCopy.Path = append(reqCopy.Path, "third-party-failed")
status.FailureCount++
// If too many failures, mark service as down
if status.FailureCount > 5 {
status.IsUp = false
status.LastCheck = currentTime
}
} else {
// Successful service call
reqCopy.Path = append(reqCopy.Path, "third-party-success")
status.SuccessCount++
// Reset failure count on successful calls
if status.FailureCount > 0 {
status.FailureCount--
}
}
reqCopy.LatencyMS += serviceLatency
}
output = append(output, &reqCopy)
}
// Update persistent state
props["_serviceStatus"] = status
// Health check: service is healthy if external service is up and not excessively rate limited
// Allow some rate limiting but not too much
maxRateLimitHits := 10 // Allow up to 10 rate limit hits before considering unhealthy
healthy := status.IsUp && status.RateLimitHits < maxRateLimitHits
return output, healthy
}
// getProviderReliability returns the reliability percentage for different providers
func (t ThirdPartyServiceLogic) getProviderReliability(provider string) float64 {
switch provider {
case "Stripe":
return 0.999 // 99.9% uptime
case "Twilio":
return 0.998 // 99.8% uptime
case "SendGrid":
return 0.997 // 99.7% uptime
case "AWS":
return 0.9995 // 99.95% uptime
case "Google":
return 0.9999 // 99.99% uptime
case "Slack":
return 0.995 // 99.5% uptime
case "GitHub":
return 0.996 // 99.6% uptime
case "Shopify":
return 0.998 // 99.8% uptime
default:
return 0.99 // 99% uptime for generic services
}
}
// getProviderRateLimit returns the rate limit (requests per tick) for different providers
func (t ThirdPartyServiceLogic) getProviderRateLimit(provider string) int {
switch provider {
case "Stripe":
return 100 // 100 requests per second (per tick in our sim)
case "Twilio":
return 50 // More restrictive
case "SendGrid":
return 200 // Email is typically higher volume
case "AWS":
return 1000 // Very high limits
case "Google":
return 500 // High but controlled
case "Slack":
return 30 // Very restrictive for chat APIs
case "GitHub":
return 60 // GitHub API limits
case "Shopify":
return 80 // E-commerce API limits
default:
return 100 // Default rate limit
}
}
// getProviderLatencyVariance returns the latency variance factor for different providers
func (t ThirdPartyServiceLogic) getProviderLatencyVariance(provider string) float64 {
switch provider {
case "Stripe":
return 0.3 // Low variance, consistent performance
case "Twilio":
return 0.5 // Moderate variance
case "SendGrid":
return 0.4 // Email services are fairly consistent
case "AWS":
return 0.2 // Very consistent
case "Google":
return 0.25 // Very consistent
case "Slack":
return 0.6 // Chat services can be variable
case "GitHub":
return 0.4 // Moderate variance
case "Shopify":
return 0.5 // E-commerce can be variable under load
default:
return 0.5 // Default variance
}
}
// calculateServiceLatency computes the actual latency including variance
func (t ThirdPartyServiceLogic) calculateServiceLatency(provider string, baseLatency int, variance float64) int {
// Add random variance to base latency
varianceMs := float64(baseLatency) * variance
randomVariance := (rand.Float64() - 0.5) * 2 * varianceMs // -variance to +variance
finalLatency := float64(baseLatency) + randomVariance
// Ensure minimum latency (can't be negative or too low)
if finalLatency < 10 {
finalLatency = 10
}
// Add provider-specific baseline adjustments
switch provider {
case "AWS", "Google":
// Cloud providers are typically fast
finalLatency *= 0.8
case "Slack":
// Chat APIs can be slower
finalLatency *= 1.2
case "Twilio":
// Telecom APIs have processing overhead
finalLatency *= 1.1
}
return int(finalLatency)
}

382
internal/simulation/thirdpartyservice_test.go

@ -1,382 +0,0 @@
package simulation
import (
"testing"
)
func TestThirdPartyServiceLogic_BasicProcessing(t *testing.T) {
logic := ThirdPartyServiceLogic{}
props := map[string]any{
"provider": "Stripe",
"latency": 150.0,
}
requests := []*Request{
{ID: "1", Type: "POST", LatencyMS: 50, Path: []string{}},
{ID: "2", Type: "GET", LatencyMS: 30, Path: []string{}},
}
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected third party service to be healthy")
}
if len(output) != 2 {
t.Errorf("Expected 2 processed requests, got %d", len(output))
}
// Verify latency was added (should be around base latency with some variance)
for i, req := range output {
originalLatency := requests[i].LatencyMS
if req.LatencyMS <= originalLatency {
t.Errorf("Expected third party service latency to be added")
}
// Check that path was updated
if len(req.Path) == 0 {
t.Error("Expected path to be updated")
}
lastPathElement := req.Path[len(req.Path)-1]
if lastPathElement != "third-party-success" && lastPathElement != "third-party-failed" {
t.Errorf("Expected path to indicate success or failure, got %s", lastPathElement)
}
}
}
func TestThirdPartyServiceLogic_ProviderCharacteristics(t *testing.T) {
logic := ThirdPartyServiceLogic{}
providers := []string{"Stripe", "AWS", "Slack", "Twilio"}
for _, provider := range providers {
t.Run(provider, func(t *testing.T) {
props := map[string]any{
"provider": provider,
"latency": 100.0,
}
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 0, Path: []string{}}}
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Errorf("Expected %s service to be healthy", provider)
}
if len(output) != 1 {
t.Errorf("Expected 1 processed request for %s", provider)
}
// Verify latency characteristics
addedLatency := output[0].LatencyMS
if addedLatency <= 0 {
t.Errorf("Expected %s to add latency", provider)
}
// AWS and Google should be faster than Slack
if provider == "AWS" && addedLatency > 200 {
t.Errorf("Expected AWS to have lower latency, got %dms", addedLatency)
}
})
}
}
func TestThirdPartyServiceLogic_RateLimiting(t *testing.T) {
logic := ThirdPartyServiceLogic{}
props := map[string]any{
"provider": "Slack", // Has low rate limit (30 RPS)
"latency": 100.0,
}
// Send more requests than rate limit
requests := make([]*Request, 50) // More than Slack's 30 RPS limit
for i := range requests {
requests[i] = &Request{ID: string(rune('1' + i)), Type: "POST", LatencyMS: 0}
}
output, healthy := logic.Tick(props, requests, 1)
// Should only process up to rate limit
if len(output) != 30 {
t.Errorf("Expected 30 processed requests due to Slack rate limit, got %d", len(output))
}
// Service should still be healthy with rate limiting
if !healthy {
t.Error("Expected service to be healthy despite rate limiting")
}
// Check that rate limit hits were recorded
status, ok := props["_serviceStatus"].(ServiceStatus)
if !ok {
t.Error("Expected service status to be recorded")
}
if status.RateLimitHits != 1 {
t.Errorf("Expected 1 rate limit hit, got %d", status.RateLimitHits)
}
}
func TestThirdPartyServiceLogic_ServiceFailure(t *testing.T) {
logic := ThirdPartyServiceLogic{}
props := map[string]any{
"provider": "Generic",
"latency": 100.0,
}
// Set up service as already having failures
status := ServiceStatus{
IsUp: false,
LastCheck: 0,
FailureCount: 6,
}
props["_serviceStatus"] = status
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 50, Path: []string{}}}
output, healthy := logic.Tick(props, requests, 1)
if healthy {
t.Error("Expected service to be unhealthy when external service is down")
}
if len(output) != 1 {
t.Error("Expected request to be processed even when service is down")
}
// Should have very high latency due to timeout
if output[0].LatencyMS < 5000 {
t.Errorf("Expected high latency for service failure, got %dms", output[0].LatencyMS)
}
// Check path indicates timeout
lastPath := output[0].Path[len(output[0].Path)-1]
if lastPath != "third-party-timeout" {
t.Errorf("Expected timeout path, got %s", lastPath)
}
}
func TestThirdPartyServiceLogic_ServiceRecovery(t *testing.T) {
logic := ThirdPartyServiceLogic{}
props := map[string]any{
"provider": "Stripe",
"latency": 100.0,
}
// Set up service as down but with old timestamp (should recover)
status := ServiceStatus{
IsUp: false,
LastCheck: 0, // Very old timestamp
FailureCount: 3,
}
props["_serviceStatus"] = status
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 50, Path: []string{}}}
// Run with current tick that's more than 30 seconds later
_, healthy := logic.Tick(props, requests, 400) // 40 seconds later
if !healthy {
t.Error("Expected service to be healthy after recovery")
}
// Check that service recovered
updatedStatus, ok := props["_serviceStatus"].(ServiceStatus)
if !ok {
t.Error("Expected updated service status")
}
if !updatedStatus.IsUp {
t.Error("Expected service to have recovered")
}
if updatedStatus.FailureCount != 0 {
t.Error("Expected failure count to be reset on recovery")
}
}
func TestThirdPartyServiceLogic_ReliabilityDifferences(t *testing.T) {
logic := ThirdPartyServiceLogic{}
// Test different reliability levels
testCases := []struct {
provider string
expectedReliability float64
}{
{"AWS", 0.9995},
{"Google", 0.9999},
{"Stripe", 0.999},
{"Slack", 0.995},
{"Generic", 0.99},
}
for _, tc := range testCases {
reliability := logic.getProviderReliability(tc.provider)
if reliability != tc.expectedReliability {
t.Errorf("Expected %s reliability %.4f, got %.4f",
tc.provider, tc.expectedReliability, reliability)
}
}
}
func TestThirdPartyServiceLogic_RateLimitDifferences(t *testing.T) {
logic := ThirdPartyServiceLogic{}
// Test different rate limits
testCases := []struct {
provider string
expectedLimit int
}{
{"AWS", 1000},
{"Stripe", 100},
{"Slack", 30},
{"SendGrid", 200},
{"Twilio", 50},
}
for _, tc := range testCases {
rateLimit := logic.getProviderRateLimit(tc.provider)
if rateLimit != tc.expectedLimit {
t.Errorf("Expected %s rate limit %d, got %d",
tc.provider, tc.expectedLimit, rateLimit)
}
}
}
func TestThirdPartyServiceLogic_LatencyVariance(t *testing.T) {
logic := ThirdPartyServiceLogic{}
props := map[string]any{
"provider": "Stripe",
"latency": 100.0,
}
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 0, Path: []string{}}}
latencies := []int{}
// Run multiple times to observe variance
for i := 0; i < 10; i++ {
output, _ := logic.Tick(props, requests, i)
latencies = append(latencies, output[0].LatencyMS)
}
// Check that we have variance (not all latencies are the same)
allSame := true
firstLatency := latencies[0]
for _, latency := range latencies[1:] {
if latency != firstLatency {
allSame = false
break
}
}
if allSame {
t.Error("Expected latency variance, but all latencies were the same")
}
// All latencies should be reasonable (between 50ms and 300ms for Stripe)
for _, latency := range latencies {
if latency < 50 || latency > 300 {
t.Errorf("Expected reasonable latency for Stripe, got %dms", latency)
}
}
}
func TestThirdPartyServiceLogic_DefaultValues(t *testing.T) {
logic := ThirdPartyServiceLogic{}
// Empty props should use defaults
props := map[string]any{}
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 0, Path: []string{}}}
output, healthy := logic.Tick(props, requests, 1)
if !healthy {
t.Error("Expected service to be healthy with default values")
}
if len(output) != 1 {
t.Error("Expected 1 processed request with defaults")
}
// Should have reasonable default latency (around 200ms base)
if output[0].LatencyMS < 100 || output[0].LatencyMS > 400 {
t.Errorf("Expected reasonable default latency, got %dms", output[0].LatencyMS)
}
}
func TestThirdPartyServiceLogic_SuccessCountTracking(t *testing.T) {
logic := ThirdPartyServiceLogic{}
props := map[string]any{
"provider": "AWS", // High reliability
"latency": 50.0,
}
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 0, Path: []string{}}}
// Run multiple successful requests
for i := 0; i < 5; i++ {
logic.Tick(props, requests, i)
}
status, ok := props["_serviceStatus"].(ServiceStatus)
if !ok {
t.Error("Expected service status to be tracked")
}
// Should have accumulated success count
if status.SuccessCount == 0 {
t.Error("Expected success count to be tracked")
}
// Should be healthy
if !status.IsUp {
t.Error("Expected service to remain up with successful calls")
}
}
func TestThirdPartyServiceLogic_FailureRecovery(t *testing.T) {
logic := ThirdPartyServiceLogic{}
props := map[string]any{
"provider": "Generic",
"latency": 100.0,
}
// Set up service with some failures but still up
status := ServiceStatus{
IsUp: true,
FailureCount: 3,
SuccessCount: 0,
}
props["_serviceStatus"] = status
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 0, Path: []string{}}}
// Simulate a successful call (with high probability for Generic service)
// We'll run this multiple times to ensure we get at least one success
successFound := false
for i := 0; i < 10 && !successFound; i++ {
output, _ := logic.Tick(props, requests, i)
if len(output[0].Path) > 0 && output[0].Path[len(output[0].Path)-1] == "third-party-success" {
successFound = true
}
}
if successFound {
updatedStatus, _ := props["_serviceStatus"].(ServiceStatus)
// Failure count should have decreased
if updatedStatus.FailureCount >= 3 {
t.Error("Expected failure count to decrease after successful call")
}
}
}

18
internal/simulation/user.go

@ -1,18 +0,0 @@
package simulation
// UserLogic represents the behavior of user components in the simulation.
// User components serve as traffic sources and don't process requests themselves.
// Traffic generation is handled by the simulation engine at the entry point.
type UserLogic struct{}
// Tick implements the NodeLogic interface for User components.
// User components don't process requests - they just pass them through.
// The simulation engine handles traffic generation at entry points.
func (u UserLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) {
// User components just pass through any requests they receive
// In practice, User components are typically entry points so they
// receive requests from the simulation engine itself
return queue, true
}

22
internal/simulation/webserver.go

@ -6,29 +6,21 @@ type WebServerLogic struct {
} }
func (l WebServerLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) { func (l WebServerLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) {
maxRPS := int(AsFloat64(props["rpsCapacity"])) maxRPS := int(AsFloat64(props["capacityRPS"]))
toProcess := queue toProcess := queue
if len(queue) > maxRPS { if len(queue) > maxRPS {
toProcess = queue[:maxRPS] toProcess = queue[:maxRPS]
} }
// Get base latency for web server operations
baseLatencyMs := int(AsFloat64(props["baseLatencyMs"]))
if baseLatencyMs == 0 {
baseLatencyMs = 20 // default 20ms for web server processing
}
var output []*Request var output []*Request
for _, req := range toProcess { for _, req := range toProcess {
// Create a copy of the request to preserve existing latency output = append(output, &Request{
reqCopy := *req ID: req.ID,
Timestamp: req.Timestamp,
// Add web server processing latency Origin: req.Origin,
reqCopy.LatencyMS += baseLatencyMs Type: req.Type,
reqCopy.Path = append(reqCopy.Path, "webserver-processed") })
output = append(output, &reqCopy)
} }
return output, true return output, true

93
router/handlers/chat.go

@ -1,93 +0,0 @@
package handlers
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"github.com/gorilla/websocket"
claude "github.com/potproject/claude-sdk-go"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
}
type MessageReceived struct {
Message string `json:"message"`
DesignPayload string `json:"designPayload"`
}
func Messages(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Printf("WebSocket upgrade failed: %v", err)
return
}
defer conn.Close()
client := claude.NewClient(os.Getenv("CLAUDE_API_KEY"))
for {
messageType, message, err := conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
log.Printf("WebSocket error: %v", err)
}
break
}
var messageReceived MessageReceived
err = json.Unmarshal(message, &messageReceived)
if err != nil {
fmt.Printf("error unmarshalling response: %v", err)
continue
}
if messageReceived.Message == "" {
messageReceived.Message = "<user did not send text>"
}
// Note: messageReceived.Message is already properly parsed from JSON, no need to overwrite it
prompt := fmt.Sprintf("You are a tutor that helps people learn system design. You will be given a JSON payload that looks like %s. The nodes are the components a user can put into their design and the connections will tell you how they are connected. The level name identifies what problem they are working on as well as a difficulty level. Each level has an easy, medium or hard setting. Also in the payload, there is a list of components that a user can use to build their design. Your hints and responses should only refer to these components and not refer to things that the user cannot use. Always refer to the nodes by their type. Please craft your response as if you're talking to the user. And do not reference the payload as \"payload\" but as their design. Also, please do not show the payload in your response. Do not refer to components as node-0 or whatever. Always refer to the type of component they are. Always assume that the source of traffic for any system is a user. The user component will not be visible in teh payload. Also make sure you use html to format your answer. Do not over format your response. Only use p tags. Format lists using proper lists html. Anytime the user sends a different payload back to you, make note of what is correct. Never give the actual answer, only helpful hints. If the available components do not allow the user to feasibly solve the system design problem, you should mention it and then tell them what exactly is missing from the list.", messageReceived.DesignPayload)
m := claude.RequestBodyMessages{
Model: "claude-3-7-sonnet-20250219",
MaxTokens: 1024,
SystemTypeText: []claude.RequestBodySystemTypeText{
claude.UseSystemCacheEphemeral(prompt),
},
Messages: []claude.RequestBodyMessagesMessages{
{
Role: claude.MessagesRoleUser,
ContentTypeText: []claude.RequestBodyMessagesMessagesContentTypeText{
{
Text: messageReceived.Message,
CacheControl: claude.UseCacheEphemeral(),
},
},
},
},
}
ctx := context.Background()
res, err := client.CreateMessages(ctx, m)
if err != nil {
fmt.Printf("error creating messages: %v", err)
}
// Echo the message back to client
err = conn.WriteMessage(messageType, []byte(res.Content[0].Text))
if err != nil {
log.Printf("Write error: %v", err)
break
}
}
log.Println("Client disconnected")
}

38
router/handlers/game.go

@ -1,10 +1,10 @@
package handlers package handlers
import ( import (
"encoding/json"
"fmt"
"html/template" "html/template"
"net/http" "net/http"
"net/url"
"strings"
"systemdesigngame/internal/auth" "systemdesigngame/internal/auth"
"systemdesigngame/internal/level" "systemdesigngame/internal/level"
) )
@ -14,35 +14,33 @@ type PlayHandler struct {
} }
func (h *PlayHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h *PlayHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
levelId := r.PathValue("levelId") levelName := strings.TrimPrefix(r.URL.Path, "/play/")
levelName, err := url.PathUnescape(levelName)
username := r.Context().Value(auth.UserLoginKey).(string)
avatar := r.Context().Value(auth.UserAvatarKey).(string)
lvl, err := level.GetLevelByID(levelId)
if err != nil { if err != nil {
http.Error(w, "Level not found: "+err.Error(), http.StatusNotFound) http.Error(w, "Invalid level name", http.StatusBadRequest)
return return
} }
levelPayload, err := json.Marshal(lvl) username := r.Context().Value(auth.UserLoginKey).(string)
avatar := r.Context().Value(auth.UserAvatarKey).(string)
lvl, err := level.GetLevel(strings.ToLower(levelName), level.DifficultyEasy)
if err != nil { if err != nil {
fmt.Printf("error marshaling level: %v", err) http.Error(w, "Level not found: "+err.Error(), http.StatusNotFound)
return
} }
allLevels := level.AllLevels() allLevels := level.AllLevels()
data := struct { data := struct {
LevelPayload template.JS Levels []level.Level
Levels []level.Level Level *level.Level
Level *level.Level Avatar string
Avatar string Username string
Username string
}{ }{
LevelPayload: template.JS(levelPayload), Levels: allLevels,
Levels: allLevels, Level: lvl,
Level: lvl, Avatar: avatar,
Avatar: avatar, Username: username,
Username: username,
} }
h.Tmpl.ExecuteTemplate(w, "game.html", data) h.Tmpl.ExecuteTemplate(w, "game.html", data)

133
router/handlers/results.go

@ -3,145 +3,20 @@ package handlers
import ( import (
"html/template" "html/template"
"net/http" "net/http"
"strconv"
"strings"
) )
type ResultHandler struct { type ResultHandler struct {
Tmpl *template.Template Tmpl *template.Template
} }
type SuccessData struct {
LevelName string
Score int
TargetRPS int
AchievedRPS int
TargetLatency int
ActualLatency float64
Availability float64
Feedback []string
LevelID string
}
type FailureData struct {
LevelName string
Reason string
TargetRPS int
AchievedRPS int
TargetLatency int
ActualLatency float64
TargetAvail float64
ActualAvail float64
FailedReqs []string
LevelID string
}
func (r *ResultHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (r *ResultHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Default to success page for backward compatibility data := struct {
data := SuccessData{ Title string
LevelName: "Demo Level", }{
Score: 85, Title: "Title",
TargetRPS: 10000,
AchievedRPS: 10417,
TargetLatency: 200,
ActualLatency: 87,
Availability: 99.9,
Feedback: []string{"All requirements met successfully!"},
LevelID: "demo",
} }
if err := r.Tmpl.ExecuteTemplate(w, "success.html", data); err != nil { if err := r.Tmpl.ExecuteTemplate(w, "success.html", data); err != nil {
http.Error(w, "Template Error", http.StatusInternalServerError) http.Error(w, "Template Error", http.StatusInternalServerError)
} }
} }
type SuccessHandler struct {
Tmpl *template.Template
}
func (h *SuccessHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
data := SuccessData{
LevelName: req.URL.Query().Get("level"),
Score: parseInt(req.URL.Query().Get("score"), 85),
TargetRPS: parseInt(req.URL.Query().Get("targetRPS"), 10000),
AchievedRPS: parseInt(req.URL.Query().Get("achievedRPS"), 10417),
TargetLatency: parseInt(req.URL.Query().Get("targetLatency"), 200),
ActualLatency: parseFloat(req.URL.Query().Get("actualLatency"), 87),
Availability: parseFloat(req.URL.Query().Get("availability"), 99.9),
Feedback: parseStringSlice(req.URL.Query().Get("feedback")),
LevelID: req.URL.Query().Get("levelId"),
}
if data.LevelName == "" {
data.LevelName = "System Design Challenge"
}
if len(data.Feedback) == 0 {
data.Feedback = []string{"All requirements met successfully!"}
}
if err := h.Tmpl.ExecuteTemplate(w, "success.html", data); err != nil {
http.Error(w, "Template Error", http.StatusInternalServerError)
}
}
type FailureHandler struct {
Tmpl *template.Template
}
func (h *FailureHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
data := FailureData{
LevelName: req.URL.Query().Get("level"),
Reason: req.URL.Query().Get("reason"),
TargetRPS: parseInt(req.URL.Query().Get("targetRPS"), 10000),
AchievedRPS: parseInt(req.URL.Query().Get("achievedRPS"), 2847),
TargetLatency: parseInt(req.URL.Query().Get("targetLatency"), 200),
ActualLatency: parseFloat(req.URL.Query().Get("actualLatency"), 1247),
TargetAvail: parseFloat(req.URL.Query().Get("targetAvail"), 99.9),
ActualAvail: parseFloat(req.URL.Query().Get("actualAvail"), 87.3),
FailedReqs: parseStringSlice(req.URL.Query().Get("failedReqs")),
LevelID: req.URL.Query().Get("levelId"),
}
if data.LevelName == "" {
data.LevelName = "System Design Challenge"
}
if data.Reason == "" {
data.Reason = "performance"
}
if len(data.FailedReqs) == 0 {
data.FailedReqs = []string{"Latency exceeded target", "Availability below requirement"}
}
if err := h.Tmpl.ExecuteTemplate(w, "failure.html", data); err != nil {
http.Error(w, "Template Error", http.StatusInternalServerError)
}
}
// Helper functions
func parseInt(s string, defaultValue int) int {
if s == "" {
return defaultValue
}
if val, err := strconv.Atoi(s); err == nil {
return val
}
return defaultValue
}
func parseFloat(s string, defaultValue float64) float64 {
if s == "" {
return defaultValue
}
if val, err := strconv.ParseFloat(s, 64); err == nil {
return val
}
return defaultValue
}
func parseStringSlice(s string) []string {
if s == "" {
return []string{}
}
// Split by pipe character for multiple values
return strings.Split(s, "|")
}

552
router/handlers/simulation.go

@ -2,25 +2,17 @@ package handlers
import ( import (
"encoding/json" "encoding/json"
"fmt"
"net/http" "net/http"
"strings"
"systemdesigngame/internal/design" "systemdesigngame/internal/design"
"systemdesigngame/internal/level"
"systemdesigngame/internal/simulation"
) )
type SimulationHandler struct{} type SimulationHandler struct{}
type SimulationResponse struct { type SimulationResponse struct {
Success bool `json:"success"` Success bool `json:"success"`
Metrics map[string]interface{} `json:"metrics,omitempty"` Metrics map[string]interface{} `json:"metrics,omitempty"`
Timeline []interface{} `json:"timeline,omitempty"` Timeline []interface{} `json:"timeline,omitempty"`
Passed bool `json:"passed"` Error string `json:"error,omitempty"`
Score int `json:"score,omitempty"`
Feedback []string `json:"feedback,omitempty"`
LevelName string `json:"levelName,omitempty"`
Error string `json:"error,omitempty"`
} }
func (h *SimulationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h *SimulationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@ -29,531 +21,27 @@ func (h *SimulationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
var requestBody struct { var design design.Design
Design design.Design `json:"design"` if err := json.NewDecoder(r.Body).Decode(&design); err != nil {
LevelID string `json:"levelId,omitempty"` http.Error(w, "Invalid design JSON: "+err.Error(), http.StatusBadRequest)
}
if err := json.NewDecoder(r.Body).Decode(&requestBody); err != nil {
// Try to decode as just design for backward compatibility
r.Body.Close()
var design design.Design
if err2 := json.NewDecoder(r.Body).Decode(&design); err2 != nil {
http.Error(w, "Invalid request JSON: "+err.Error(), http.StatusBadRequest)
return
}
requestBody.Design = design
}
// Extract the design for processing
design := requestBody.Design
// Run the actual simulation
engine := simulation.NewEngineFromDesign(design, 100)
if engine == nil {
response := SimulationResponse{
Success: false,
Error: "Failed to create simulation engine - no valid components found",
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
return return
} }
// Set simulation parameters // For now, return a mock successful response but eventually, we want to go to the results page(s)
defaultRPS := 50 response := SimulationResponse{
targetRPS := defaultRPS Success: true,
Metrics: map[string]interface{}{
if requestBody.LevelID != "" { "throughput": 250,
if lvl, err := level.GetLevelByID(requestBody.LevelID); err == nil { "latency_p95": 85,
targetRPS = lvl.TargetRPS "cost_monthly": 120,
} "availability": 99.5,
},
Timeline: []interface{}{}, // Will contain TickSnapshots later
} }
engine.RPS = targetRPS w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(response); err != nil {
// Find entry node by analyzing topology http.Error(w, "Failed to encode response", http.StatusInternalServerError)
entryNode := findEntryNode(design)
if entryNode == "" {
response := SimulationResponse{
Success: false,
Error: "No entry point found - design must include a component with no incoming connections (webserver, microservice, load balancer, etc.)",
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
return return
} }
engine.EntryNode = entryNode
// Run simulation for 60 ticks (6 seconds at 100ms per tick)
snapshots := engine.Run(60, 100)
// Calculate metrics from snapshots
metrics := calculateMetrics(snapshots, design)
// Convert snapshots to interface{} for JSON serialization
timeline := make([]interface{}, len(snapshots))
for i, snapshot := range snapshots {
timeline[i] = snapshot
}
// Perform level validation if level info provided
var passed bool
var score int
var feedback []string
var levelName string
if requestBody.LevelID != "" {
if lvl, err := level.GetLevelByID(requestBody.LevelID); err == nil {
levelName = lvl.Name
passed, score, feedback = validateLevel(lvl, design, metrics)
} else {
feedback = []string{"Warning: Level not found, simulation ran without validation"}
}
}
// Build redirect URL based on success/failure
var redirectURL string
if passed {
// Success page
redirectURL = buildSuccessURL(levelName, score, metrics, feedback, requestBody.LevelID)
} else {
// Failure page
redirectURL = buildFailureURL(levelName, metrics, feedback, requestBody.LevelID)
}
// Redirect to appropriate result page
http.Redirect(w, r, redirectURL, http.StatusSeeOther)
}
// calculateMetrics computes key performance metrics from simulation snapshots
func calculateMetrics(snapshots []*simulation.TickSnapshot, design design.Design) map[string]interface{} {
if len(snapshots) == 0 {
return map[string]interface{}{
"throughput": 0,
"latency_avg": 0,
"cost_monthly": 0,
"availability": 0,
}
}
totalRequests := 0
totalLatency := 0
totalHealthy := 0
totalNodes := 0
// Build map of outgoing connections to identify final destinations
outgoingConnections := make(map[string]bool)
for _, conn := range design.Connections {
outgoingConnections[conn.Source] = true
}
// Calculate aggregate metrics across all snapshots
for _, snapshot := range snapshots {
// Count total requests processed in this tick
for nodeID, requests := range snapshot.Emitted {
// Only count requests from final destination nodes (no outgoing connections)
if !outgoingConnections[nodeID] {
totalRequests += len(requests)
for _, req := range requests {
totalLatency += req.LatencyMS
}
}
}
// Count healthy vs total nodes
for _, healthy := range snapshot.NodeHealth {
totalNodes++
if healthy {
totalHealthy++
}
}
}
// Calculate throughput (requests per second)
// snapshots represent 6 seconds of simulation (60 ticks * 100ms)
simulationSeconds := float64(len(snapshots)) * 0.1 // 100ms per tick
throughput := float64(totalRequests) / simulationSeconds
// Calculate average latency
avgLatency := 0.0
if totalRequests > 0 {
avgLatency = float64(totalLatency) / float64(totalRequests)
}
// Calculate availability percentage
availability := 0.0
if totalNodes > 0 {
availability = (float64(totalHealthy) / float64(totalNodes)) * 100
}
// Calculate monthly cost based on component specifications
monthlyCost := calculateRealMonthlyCost(design.Nodes)
return map[string]interface{}{
"throughput": int(throughput),
"latency_avg": avgLatency,
"cost_monthly": int(monthlyCost),
"availability": availability,
}
}
// findEntryNode analyzes the design topology to find the best entry point
func findEntryNode(design design.Design) string {
// Build map of incoming connections
incomingCount := make(map[string]int)
// Initialize all nodes with 0 incoming connections
for _, node := range design.Nodes {
incomingCount[node.ID] = 0
}
// Count incoming connections for each node
for _, conn := range design.Connections {
incomingCount[conn.Target]++
}
// Find nodes with no incoming connections (potential entry points)
var entryPoints []string
for nodeID, count := range incomingCount {
if count == 0 {
entryPoints = append(entryPoints, nodeID)
}
}
// If multiple entry points exist, prefer certain types
if len(entryPoints) > 1 {
return preferredEntryPoint(design.Nodes, entryPoints)
} else if len(entryPoints) == 1 {
return entryPoints[0]
}
return "" // No entry point found
}
// preferredEntryPoint selects the best entry point from candidates based on component type
func preferredEntryPoint(nodes []design.Node, candidateIDs []string) string {
// Priority order for entry points (most logical first)
priority := []string{
"webserver",
"microservice",
"loadBalancer", // Could be edge load balancer
"cdn", // Edge CDN
"data pipeline", // Data ingestion entry
"messageQueue", // For event-driven architectures
}
// Create lookup for candidate nodes
candidates := make(map[string]design.Node)
for _, node := range nodes {
for _, id := range candidateIDs {
if node.ID == id {
candidates[id] = node
break
}
}
}
// Find highest priority candidate
for _, nodeType := range priority {
for id, node := range candidates {
if node.Type == nodeType {
return id
}
}
}
// If no preferred type, return first candidate
if len(candidateIDs) > 0 {
return candidateIDs[0]
}
return ""
}
// validateLevel checks if the design and simulation results meet level requirements
func validateLevel(lvl *level.Level, design design.Design, metrics map[string]interface{}) (bool, int, []string) {
var feedback []string
var failedRequirements []string
var passedRequirements []string
// Extract metrics
avgLatency := int(metrics["latency_avg"].(float64))
availability := metrics["availability"].(float64)
monthlyCost := metrics["cost_monthly"].(int)
// Check latency requirement (using avg latency as approximation for P95)
if avgLatency <= lvl.MaxP95LatencyMs {
passedRequirements = append(passedRequirements, "Latency requirement met")
} else {
failedRequirements = append(failedRequirements,
fmt.Sprintf("Latency: %dms (max allowed: %dms)", avgLatency, lvl.MaxP95LatencyMs))
}
// Check availability requirement
if availability >= lvl.RequiredAvailabilityPct {
passedRequirements = append(passedRequirements, "Availability requirement met")
} else {
failedRequirements = append(failedRequirements,
fmt.Sprintf("Availability: %.1f%% (required: %.1f%%)", availability, lvl.RequiredAvailabilityPct))
}
// Check cost requirement
if monthlyCost <= lvl.MaxMonthlyUSD {
passedRequirements = append(passedRequirements, "Cost requirement met")
} else {
failedRequirements = append(failedRequirements,
fmt.Sprintf("Cost: $%d/month (max allowed: $%d/month)", monthlyCost, lvl.MaxMonthlyUSD))
}
// Check component requirements
componentFeedback := validateComponentRequirements(lvl, design)
if len(componentFeedback.Failed) > 0 {
failedRequirements = append(failedRequirements, componentFeedback.Failed...)
}
if len(componentFeedback.Passed) > 0 {
passedRequirements = append(passedRequirements, componentFeedback.Passed...)
}
// Determine if passed
passed := len(failedRequirements) == 0
// Calculate score (0-100)
score := calculateScore(len(passedRequirements), len(failedRequirements), metrics)
// Build feedback
if passed {
feedback = append(feedback, "Level completed successfully!")
feedback = append(feedback, "")
feedback = append(feedback, passedRequirements...)
} else {
feedback = append(feedback, "Level failed - requirements not met:")
feedback = append(feedback, "")
feedback = append(feedback, failedRequirements...)
if len(passedRequirements) > 0 {
feedback = append(feedback, "")
feedback = append(feedback, "Requirements passed:")
feedback = append(feedback, passedRequirements...)
}
}
return passed, score, feedback
}
type ComponentValidationResult struct {
Passed []string
Failed []string
}
// validateComponentRequirements checks mustInclude, mustNotInclude, etc.
func validateComponentRequirements(lvl *level.Level, design design.Design) ComponentValidationResult {
result := ComponentValidationResult{}
// Build map of component types in design
componentTypes := make(map[string]int)
for _, node := range design.Nodes {
componentTypes[node.Type]++
}
// Check mustInclude requirements
for _, required := range lvl.MustInclude {
if count, exists := componentTypes[required]; exists && count > 0 {
result.Passed = append(result.Passed, fmt.Sprintf("Required component '%s' included", required))
} else {
result.Failed = append(result.Failed, fmt.Sprintf("Missing required component: '%s'", required))
}
}
// Check mustNotInclude requirements
for _, forbidden := range lvl.MustNotInclude {
if count, exists := componentTypes[forbidden]; exists && count > 0 {
result.Failed = append(result.Failed, fmt.Sprintf("Forbidden component used: '%s'", forbidden))
}
}
// Check minReplicas requirements
for component, minCount := range lvl.MinReplicas {
if count, exists := componentTypes[component]; exists && count >= minCount {
result.Passed = append(result.Passed, fmt.Sprintf("Sufficient '%s' replicas (%d)", component, count))
} else {
actualCount := 0
if exists {
actualCount = count
}
result.Failed = append(result.Failed,
fmt.Sprintf("Insufficient '%s' replicas: %d (minimum: %d)", component, actualCount, minCount))
}
}
return result
}
// calculateScore computes a score from 0-100 based on performance
func calculateScore(passedCount, failedCount int, metrics map[string]interface{}) int {
if failedCount > 0 {
// Failed level - score based on how many requirements passed
return (passedCount * 100) / (passedCount + failedCount)
}
// Passed level - bonus points for performance
baseScore := 70 // Base score for passing
// Performance bonuses (up to 30 points)
performanceBonus := 0
// Throughput bonus (higher throughput = better)
if throughput, ok := metrics["throughput"].(int); ok && throughput > 0 {
performanceBonus += min(10, throughput/100) // 1 point per 100 RPS, max 10
}
// Availability bonus (higher availability = better)
if availability, ok := metrics["availability"].(float64); ok {
if availability >= 99.9 {
performanceBonus += 10
} else if availability >= 99.5 {
performanceBonus += 5
}
}
// Cost efficiency bonus (lower cost = better)
if cost, ok := metrics["cost_monthly"].(int); ok && cost > 0 {
if cost <= 50 {
performanceBonus += 10
} else if cost <= 100 {
performanceBonus += 5
}
}
return min(100, baseScore+performanceBonus)
}
// calculateRealMonthlyCost computes monthly cost based on actual component specifications
func calculateRealMonthlyCost(nodes []design.Node) float64 {
totalCost := 0.0
for _, node := range nodes {
switch node.Type {
case "user":
// User components don't cost anything
continue
case "microservice":
if monthlyUsd, ok := node.Props["monthlyUsd"].(float64); ok {
if instanceCount, ok := node.Props["instanceCount"].(float64); ok {
totalCost += monthlyUsd * instanceCount
}
}
case "webserver":
if monthlyCost, ok := node.Props["monthlyCostUsd"].(float64); ok {
totalCost += monthlyCost
}
default:
// Default cost for other components (cache, database, load balancer, etc.)
totalCost += 20 // $20/month baseline
}
}
return totalCost
}
// Helper function
func min(a, b int) int {
if a < b {
return a
}
return b
}
// buildSuccessURL creates a URL for the success page with simulation results
func buildSuccessURL(levelName string, score int, metrics map[string]interface{}, feedback []string, levelID string) string {
baseURL := "/success"
// Get level data if available
var targetRPS, targetLatency int
var targetAvail float64
if levelID != "" {
if lvl, err := level.GetLevelByID(levelID); err == nil {
targetRPS = lvl.TargetRPS
targetLatency = lvl.MaxP95LatencyMs
targetAvail = lvl.RequiredAvailabilityPct
}
}
// Use defaults if level not found
if targetRPS == 0 {
targetRPS = 10000
}
if targetLatency == 0 {
targetLatency = 200
}
if targetAvail == 0 {
targetAvail = 99.9
}
params := []string{
fmt.Sprintf("level=%s", levelName),
fmt.Sprintf("score=%d", score),
fmt.Sprintf("targetRPS=%d", targetRPS),
fmt.Sprintf("achievedRPS=%d", metrics["throughput"].(int)),
fmt.Sprintf("targetLatency=%d", targetLatency),
fmt.Sprintf("actualLatency=%.1f", metrics["latency_avg"].(float64)),
fmt.Sprintf("availability=%.1f", metrics["availability"].(float64)),
fmt.Sprintf("levelId=%s", levelID),
}
// Add feedback as pipe-separated values
if len(feedback) > 0 {
feedbackStr := strings.Join(feedback, "|")
params = append(params, fmt.Sprintf("feedback=%s", feedbackStr))
}
return baseURL + "?" + strings.Join(params, "&")
}
// buildFailureURL creates a URL for the failure page with simulation results
func buildFailureURL(levelName string, metrics map[string]interface{}, feedback []string, levelID string) string {
baseURL := "/failure"
// Get level data if available
var targetRPS, targetLatency int
var targetAvail float64
if levelID != "" {
if lvl, err := level.GetLevelByID(levelID); err == nil {
targetRPS = lvl.TargetRPS
targetLatency = lvl.MaxP95LatencyMs
targetAvail = lvl.RequiredAvailabilityPct
}
}
// Use defaults if level not found
if targetRPS == 0 {
targetRPS = 10000
}
if targetLatency == 0 {
targetLatency = 200
}
if targetAvail == 0 {
targetAvail = 99.9
}
params := []string{
fmt.Sprintf("level=%s", levelName),
fmt.Sprintf("reason=performance"),
fmt.Sprintf("targetRPS=%d", targetRPS),
fmt.Sprintf("achievedRPS=%d", metrics["throughput"].(int)),
fmt.Sprintf("targetLatency=%d", targetLatency),
fmt.Sprintf("actualLatency=%.1f", metrics["latency_avg"].(float64)),
fmt.Sprintf("targetAvail=%.1f", targetAvail),
fmt.Sprintf("actualAvail=%.1f", metrics["availability"].(float64)),
fmt.Sprintf("levelId=%s", levelID),
}
// Add failed requirements as pipe-separated values
if len(feedback) > 0 {
failedReqsStr := strings.Join(feedback, "|")
params = append(params, fmt.Sprintf("failedReqs=%s", failedReqsStr))
}
return baseURL + "?" + strings.Join(params, "&")
} }

5
router/router.go

@ -14,13 +14,10 @@ func SetupRoutes(tmpl *template.Template) *http.ServeMux {
mux.Handle("/", &handlers.HomeHandler{Tmpl: tmpl}) mux.Handle("/", &handlers.HomeHandler{Tmpl: tmpl})
mux.Handle("/mode", auth.RequireAuth(&handlers.PlayHandler{Tmpl: tmpl})) mux.Handle("/mode", auth.RequireAuth(&handlers.PlayHandler{Tmpl: tmpl}))
mux.Handle("/play/{levelId}", auth.RequireAuth(&handlers.PlayHandler{Tmpl: tmpl})) mux.Handle("/play/", auth.RequireAuth(&handlers.PlayHandler{Tmpl: tmpl}))
mux.Handle("/simulate", auth.RequireAuth(&handlers.SimulationHandler{})) mux.Handle("/simulate", auth.RequireAuth(&handlers.SimulationHandler{}))
mux.Handle("/success", auth.RequireAuth(&handlers.SuccessHandler{Tmpl: tmpl}))
mux.Handle("/failure", auth.RequireAuth(&handlers.FailureHandler{Tmpl: tmpl}))
mux.HandleFunc("/login", auth.LoginHandler) mux.HandleFunc("/login", auth.LoginHandler)
mux.HandleFunc("/callback", auth.CallbackHandler) mux.HandleFunc("/callback", auth.CallbackHandler)
mux.HandleFunc("/ws", handlers.Messages)
return mux return mux
} }

319
static/app.js

@ -12,23 +12,6 @@ import './plugins/datapipeline.js';
import './plugins/monitorAlerting.js'; import './plugins/monitorAlerting.js';
import './plugins/thirdPartyService.js'; import './plugins/thirdPartyService.js';
import { PluginRegistry } from './pluginRegistry.js'; import { PluginRegistry } from './pluginRegistry.js';
import { initializeObservers } from './observers.js';
import {
CommandInvoker,
SwitchToResourcesTabCommand,
SwitchToDesignTabCommand,
ToggleArrowModeCommand,
StartChatCommand,
SendChatMessageCommand,
HandleDragStartCommand,
HandleDragEndCommand,
DropComponentCommand,
RunSimulationCommand,
HandleCanvasClickCommand,
SaveNodePropertiesCommand,
DeleteSelectionCommand
} from './commands.js';
import { CanvasStateMachine } from './states/CanvasStateMachine.js';
export class CanvasApp { export class CanvasApp {
constructor() { constructor() {
@ -55,105 +38,182 @@ export class CanvasApp {
this.computeGroup = document.getElementById('compute-group'); this.computeGroup = document.getElementById('compute-group');
this.lbGroup = document.getElementById('lb-group'); this.lbGroup = document.getElementById('lb-group');
this.mqGroup = document.getElementById('mq-group'); this.mqGroup = document.getElementById('mq-group');
this.startChatBtn = document.getElementById('start-chat');
this.chatElement = document.getElementById('chat-box');
this.chatTextField = document.getElementById('chat-message-box');
this.chatMessages = document.getElementById('messages');
this.chatLoadingIndicator = document.getElementById('loading-indicator');
this.level = window.levelData;
this.ws = null;
this.plugins = PluginRegistry.getAll()
this.createDesignBtn = document.getElementById('create-design-button');
this.learnMoreBtn = document.getElementById('learn-more-button');
this.tabs = document.getElementsByClassName('tabinput');
this._reconnectDelay = 1000;
this._maxReconnectDelay = 15000;
this._reconnectTimer = null;
// Initialize observer system (alongside existing event handling)
const observers = initializeObservers(this.nodePropsPanel, this.propsSaveBtn);
this.propertiesPanelSubject = observers.propertiesPanel;
this.nodeSelectionSubject = observers.nodeSelection;
this.connectionModeSubject = observers.connectionMode;
// Initialize command system
this.commandInvoker = new CommandInvoker(this);
// Initialize state machine
this.stateMachine = new CanvasStateMachine(this);
this.initEventHandlers(); this.initEventHandlers();
} }
initEventHandlers() { initEventHandlers() {
const requirementstab = this.tabs[1];
const designtab = this.tabs[1];
const resourcestab = this.tabs[2];
this.learnMoreBtn.addEventListener('click', () => {
this.commandInvoker.execute(new SwitchToResourcesTabCommand());
});
this.createDesignBtn.addEventListener('click', () => {
this.commandInvoker.execute(new SwitchToDesignTabCommand());
});
this.arrowToolBtn.addEventListener('click', () => { this.arrowToolBtn.addEventListener('click', () => {
this.commandInvoker.execute(new ToggleArrowModeCommand()); this.arrowMode = !this.arrowMode;
}); if (this.arrowMode) {
this.startChatBtn.addEventListener('click', () => { this.arrowToolBtn.classList.add('active');
this.commandInvoker.execute(new StartChatCommand()); this.hidePropsPanel();
}); } else {
this.chatTextField.addEventListener('keydown', (e) => { this.arrowToolBtn.classList.remove('active');
if (e.key === 'Enter' && !e.shiftKey) { if (this.connectionStart) {
e.preventDefault(); this.connectionStart.group.classList.remove('selected');
console.log('you sent a message'); this.connectionStart = null;
const message = this.chatTextField.value;
if (message.trim()) {
this.commandInvoker.execute(new SendChatMessageCommand(message));
} }
} }
}); });
this.sidebar.addEventListener('dragstart', (e) => { this.sidebar.addEventListener('dragstart', (e) => {
this.commandInvoker.execute(new HandleDragStartCommand(e)); const type = e.target.getAttribute('data-type');
const plugin = PluginRegistry.get(type);
if (!plugin) return;
e.dataTransfer.setData('text/plain', type)
}); });
this.sidebar.addEventListener('dragend', (e) => { this.sidebar.addEventListener('dragend', (e) => {
this.commandInvoker.execute(new HandleDragEndCommand(e)); if (e.target.classList.contains('component-icon')) {
e.target.classList.remove('dragging');
}
}); });
this.canvasContainer.addEventListener('dragover', (e) => e.preventDefault()); this.canvasContainer.addEventListener('dragover', (e) => e.preventDefault());
this.canvasContainer.addEventListener('drop', (e) => { this.canvasContainer.addEventListener('drop', (e) => {
this.commandInvoker.execute(new DropComponentCommand(e)); const type = e.dataTransfer.getData('text/plain');
const plugin = PluginRegistry.get(type);
if (!plugin) return;
const pt = this.canvas.createSVGPoint();
pt.x = e.clientX;
pt.y = e.clientY;
const svgP = pt.matrixTransform(this.canvas.getScreenCTM().inverse());
const x = svgP.x - this.componentSize.width / 2;
const y = svgP.y - this.componentSize.height / 2;
const props = generateDefaultProps(plugin);
const node = new ComponentNode(type, x, y, this, props);
node.x = x;
node.y = y;
}); });
this.runButton.addEventListener('click', () => { this.runButton.addEventListener('click', () => {
this.commandInvoker.execute(new RunSimulationCommand()); const designData = this.exportDesign();
console.log(JSON.stringify(designData))
}); });
this.canvas.addEventListener('click', (e) => { this.canvas.addEventListener('click', () => {
this.commandInvoker.execute(new HandleCanvasClickCommand(e)); if (this.connectionStart) {
this.connectionStart.group.classList.remove('selected');
this.connectionStart = null;
}
this.hidePropsPanel();
this.clearSelection();
}); });
this.propsSaveBtn.addEventListener('click', () => { this.propsSaveBtn.addEventListener('click', () => {
this.commandInvoker.execute(new SaveNodePropertiesCommand()); if (!this.activeNode) return;
});
// Prevent props panel from closing when clicking inside it const node = this.activeNode;
this.nodePropsPanel.addEventListener('click', (e) => { const panel = this.nodePropsPanel;
e.stopPropagation(); const plugin = PluginRegistry.get(node.type);
if (!plugin || !plugin.props) {
this.hidePropsPanel();
return;
}
// Loop through plugin-defined props and update the node
for (const prop of plugin.props) {
const input = panel.querySelector(`[name='${prop.name}']`);
if (!input) continue;
let value;
if (prop.type === 'number') {
value = parseFloat(input.value);
if (isNaN(value)) value = prop.default ?? 0;
} else {
value = input.value;
}
node.props[prop.name] = value;
if (prop.name === 'label') {
node.updateLabel(value);
}
}
this.hidePropsPanel();
}); });
document.addEventListener('keydown', (e) => { document.addEventListener('keydown', (e) => {
this.commandInvoker.execute(new DeleteSelectionCommand(e.key)); if (e.key === 'Backspace' || e.key === 'Delete') {
if (this.selectedConnection) {
this.canvas.removeChild(this.selectedConnection.line);
this.canvas.removeChild(this.selectedConnection.text);
const index = this.connections.indexOf(this.selectedConnection);
if (index !== -1) this.connections.splice(index, 1);
this.selectedConnection = null;
} else if (this.selectedNode) {
this.canvas.removeChild(this.selectedNode.group);
this.placedComponents = this.placedComponents.filter(n => n !== this.selectedNode);
this.connections = this.connections.filter(conn => {
if (conn.start === this.selectedNode || conn.end === this.selectedNode) {
this.canvas.removeChild(conn.line);
this.canvas.removeChild(conn.text);
return false;
}
return true;
});
this.selectedNode = null;
this.activeNode = null;
this.hidePropsPanel();
}
}
}); });
} }
showPropsPanel(node) {
this.activeNode = node;
const plugin = PluginRegistry.get(node.type);
const panel = this.nodePropsPanel;
if (!plugin || this.arrowMode) {
this.hidePropsPanel();
return;
}
const bbox = node.group.getBBox();
const ctm = node.group.getCTM();
const screenX = ctm.e + bbox.x;
const screenY = ctm.f + bbox.y + bbox.height;
panel.style.left = (screenX + this.canvasContainer.getBoundingClientRect().left) + 'px';
panel.style.top = (screenY + this.canvasContainer.getBoundingClientRect().top) + 'px';
// Hide all groups first
const allGroups = panel.querySelectorAll('.prop-group, #label-group, #compute-group, #lb-group');
allGroups.forEach(g => g.style.display = 'none');
const shownGroups = new Set();
for (const prop of plugin.props) {
const group = panel.querySelector(`[data-group='${prop.group}']`);
const input = panel.querySelector(`[name='${prop.name}']`);
// Show group once
if (group && !shownGroups.has(group)) {
group.style.display = 'block';
shownGroups.add(group);
}
// Set value
if (input) {
input.value = node.props[prop.name] ?? prop.default;
}
}
this.propsSaveBtn.disabled = false;
panel.style.display = 'block';
}
hidePropsPanel() {
this.nodePropsPanel.style.display = 'none';
this.propsSaveBtn.disabled = true;
this.activeNode = null;
}
updateConnectionsFor(movedNode) { updateConnectionsFor(movedNode) {
this.connections.forEach(conn => { this.connections.forEach(conn => {
@ -163,10 +223,22 @@ export class CanvasApp {
}); });
} }
clearSelection() {
if (this.selectedConnection) {
this.selectedConnection.deselect();
this.selectedConnection = null;
}
if (this.selectedNode) {
this.selectedNode.deselect();
this.selectedNode = null;
this.hidePropsPanel();
}
}
exportDesign() { exportDesign() {
const nodes = this.placedComponents const nodes = this.placedComponents
.filter(n => n.type !== 'user')
.map(n => { .map(n => {
const plugin = PluginRegistry.get(n.type); const plugin = PluginRegistry.get(n.type);
const result = { const result = {
@ -193,87 +265,6 @@ export class CanvasApp {
capacity: c.capacity || 1000 capacity: c.capacity || 1000
})); }));
return { return { nodes, connections };
nodes,
connections,
level: this.level,
availableComponents: this.plugins
};
}
getLevelInfo() {
// Try to extract level info from URL path like /play/url-shortener
const pathParts = window.location.pathname.split('/');
if (pathParts.length >= 3 && pathParts[1] === 'play') {
const levelId = decodeURIComponent(pathParts[2]);
return {
levelId: levelId
};
}
return {};
}
// showResults function removed - now handled by redirect to success/failure pages
showError(errorMessage) {
alert(`Simulation Error:\n\n${errorMessage}\n\nPlease check your design and try again.`);
}
_initWebSocket() {
const scheme = location.protocol === "https:" ? "wss://" : "ws://";
this.ws = new WebSocket(scheme + location.host + "/ws");
this.ws.onopen = () => {
console.log("WebSocket connected");
// Reset reconnection delay on successful connection
this._reconnectDelay = 1000;
this.ws.send(JSON.stringify({
'designPayload': JSON.stringify(this.exportDesign()),
'message': ''
}));
};
this.ws.onmessage = (e) => {
this.chatLoadingIndicator.style.display = 'none';
this.chatTextField.disabled = false;
this.chatTextField.focus();
const message = document.createElement('p');
message.innerHTML = e.data;
message.className = "other";
this.chatMessages.insertBefore(message, this.chatLoadingIndicator);
};
this.ws.onerror = (err) => {
console.log("ws error:", err);
this._scheduleReconnect();
};
this.ws.onclose = () => {
console.log("WebSocket closed, scheduling reconnect...");
this.ws = null;
this._scheduleReconnect();
};
}
_scheduleReconnect() {
if (this._stopped) return;
if (this._reconnectTimer) {
clearTimeout(this._reconnectTimer)
this._reconnectTimer = null;
}
const jitter = this._reconnectDelay * (Math.random() * 0.4 - 0.2);
const delay = Math.max(250, Math.min(this._maxReconnectDelay, this._reconnectDelay + jitter));
console.log(`Reconnecting websocket in ${delay}ms...`)
this._reconnectTimer = setTimeout(() => {
this._reconnectTimer = null;
this._initWebSocket();
}, delay);
this._reconnectDelay = Math.min(this._maxReconnectDelay, Math.round(this._reconnectDelay * 1.8));
} }
} }

306
static/canvas.html

@ -1,306 +0,0 @@
{{ define "canvas" }}
<div id="canvas-wrapper">
<input class="tabinput" type="radio" id="tab1" name="tab" checked>
<input class="tabinput" type="radio" id="tab2" name="tab">
<!-- <input class="tabinput" type="radio" id="tab3" name="tab"> -->
<div class="tabs">
<div class="tab-labels">
<label for="tab1">Requirements</label>
<label for="tab2">Design</label>
<!-- <label for="tab3">Resources</label> -->
</div>
<!-- Requirements -->
<div id="content1" class="tab-content">
{{ if .Level.InterviewerRequirements }}
<div class="requirements-section">
<h3>Interviewer Requirements</h3>
<ul class="requirements-list">
{{ range .Level.InterviewerRequirements }}
<li class="requirement-item">{{ . }}</li>
{{ end }}
</ul>
</div>
{{ end }}
{{ if .Level.FunctionalRequirements }}
<div class="requirements-section">
<h3>Functional Requirements</h3>
<ul class="requirements-list">
{{ range .Level.FunctionalRequirements }}
<li class="requirement-item">{{ . }}</li>
{{ end }}
</ul>
</div>
{{ end }}
{{ if .Level.NonFunctionalRequirements }}
<div class="requirements-section">
<h3>Non-Functional Requirements</h3>
<ul class="requirements-list">
{{ range .Level.NonFunctionalRequirements }}
<li class="requirement-item">{{ . }}</li>
{{ end }}
</ul>
</div>
{{ end }}
<div class="continue-section">
<button class="continue-button" id="create-design-button">Create your design</button>
<button class="continue-button" id="learn-more-button">Learn more</button>
</div>
</div>
<!-- Design-->
<div id="content2" class="tab-content">
<div id="sidebar">
<div class="component-icon" draggable="true" data-type="user">
user
</div>
<div class="component-icon" draggable="true" data-type="loadBalancer">
load balancer
</div>
<div class="component-icon" draggable="true" data-type="webserver">
webserver
</div>
<div class="component-icon" draggable="true" data-type="database">
database
</div>
<div class="component-icon" draggable="true" data-type="cache">
cache
</div>
<div class="component-icon" draggable="true" data-type="messageQueue">
message queue
</div>
<div class="component-icon" draggable="true" data-type="cdn">
CDN
</div>
<div class="component-icon" draggable="true" data-type="microservice">
microservice node
</div>
<div class="component-icon" draggable="true" data-type="data pipeline">
data pipeline
</div>
<div class="component-icon" draggable="true" data-type="monitoring/alerting">
monitoring/alerting
</div>
<div class="component-icon" draggable="true" data-type="third party service">
third-party service
</div>
</div>
<div id="canvas-container">
<div id="connection-modal" style="display: none;" class="modal">
<div class="modal-content">
<h3>Create Connection</h3>
<label>
Label:
<input type="text" id="connection-label" value="Read traffic">
</label>
<label>
Protocol:
<select id="connection-protocol">
<option>HTTP</option>
<option>HTTPS</option>
<option>Database</option>
<option>Redis</option>
</select>
</label>
<label style="margin-top: 10px;">
<input type="checkbox" id="connection-tls">
Enable TLS (encryption)
</label>
<label for="connection-capacity">Capacity Limit (RPS):</label>
<input type="number" id="connection-capacity" value="1000" min="1" />
<div class="modal-actions">
<button id="connection-save">Save</button>
<button id="connection-cancel">Cancel</button>
</div>
</div>
</div>
<div id="canvas-toolbar">
<button id="arrow-tool-btn" class="toolbar-btn">Arrow Tool</button>
</div>
<svg id="canvas">
<defs>
<marker id="arrowhead-start" markerWidth="10" markerHeight="7" refX="0" refY="3.5" orient="auto"
markerUnits="strokeWidth">
<path d="M10 0 L0 3.5 L10 7" fill="#ccc" />
</marker>
<marker id="arrowhead-end" markerWidth="10" markerHeight="7" refX="10" refY="3.5" orient="auto"
markerUnits="strokeWidth">
<path d="M0 0 L10 3.5 L0 7" fill="#ccc" />
</marker>
</defs>
</svg>
<div id="node-props-panel">
<h3>node properties</h3>
<div id="label-group" data-group="label-group">
<label>label:</label>
<input type="text" name="label" />
</div>
<div id="db-group" class="prop-group" data-group="db-group">
<label>replication factor:<input type="number" name="replication" min="1" step="1" /></label>
</div>
<div id="cache-group" class="prop-group" data-group="cache-group">
<label>cache ttl (secs):<input type="number" name="cacheTTL" min="0" step="60" /></label>
<label>Max Entries: <input name="maxEntries" type="number" /></label>
<label>Eviction Policy:
<select name="evictionPolicy">
<option value="LRU">LRU (Least Recently Used)</option>
<option value="LFU">LFU (Least Frequently Used)</option>
</select>
</label>
</div>
<div id="compute-group" data-group="compute-group" class="prop-group">
<label>RPS Capacity:</label>
<input type="number" name="rpsCapacity" min="1" />
<label>Base Latency (ms):</label>
<input type="number" name="baseLatencyMs" min="1" />
</div>
<div id="lb-group" data-group="lb-group" class="prop-group">
<label>Algorithm</label>
<select name="algorithm">
<option value="round-robin">Round Robin</option>
<option value="least-connection">Least Connection</option>
</select>
</div>
<div id="mq-group" data-group="mq-group" class="prop-group">
<label>Queue Capacity (,ax Messages that can be held in que)</label>
<input type="number" name="queueCapacity" min="1" />
<label>Retention Time (seconds)</label>
<input type="number" name="retentionSeconds" min="1" />
</div>
<div id="cdn-group" data-group="cdn-group" class="prop-group">
<label>TTL (seconds)</label>
<input type="number" name="ttl" min="1" />
<label>Geo Replication</label>
<select name="geoReplication">
<option value="global">Global</option>
<option value="regional">Regional</option>
<option value="custom">Custom</option>
</select>
<label>Caching Strategy</label>
<select name="cachingStrategy">
<option value="cache-first">Cache First</option>
<option value="network-first">Network First</option>
<option value="stale-while-revalidate">Stale While Revalidate</option>
</select>
<label>Compression</label>
<select name="compression">
<option value="brotli">Brotli</option>
<option value="gzip">Gzip</option>
<option value="none">None</option>
</select>
<label>HTTP/2 Support</label>
<select name="http2">
<option value="enabled">Enabled</option>
<option value="disabled">Disabled</option>
</select>
</div>
<div id="microservice-group" data-group="microservice-group" class="prop-group">
<label>
Instance Count:
<input type="number" name="instanceCount" value="3" min="1" />
</label>
<label>
CPU (vCPUs):
<input type="number" name="cpu" value="2" min="1" />
</label>
<label>
RAM (GB):
<input type="number" name="ramGb" value="4" min="1" />
</label>
<label>
RPS Capacity:
<input type="number" name="rpsCapacity" value="150" min="1" />
</label>
<label>
Scaling Strategy:
<select name="scalingStrategy">
<option value="auto" selected>Auto</option>
<option value="manual">Manual</option>
</select>
</label>
</div>
<div id="datapipeline-group" data-group="pipeline-group" class="prop-group">
<label>Batch Size</label>
<input type="number" name="batchSize" min="1" />
<label>Schedule</label>
<select name="schedule">
<option value="realtime">Real-time</option>
<option value="hourly">Hourly</option>
<option value="daily">Daily</option>
<option value="weekly">Weekly</option>
</select>
<label>Transformations</label>
<select name="transformations">
<option value="normalize">Normalize</option>
<option value="dedupe">Dedupe</option>
<option value="filter">Filter</option>
<option value="enrich">Enrich</option>
<option value="aggregate">Aggregate</option>
</select>
<label>Destination</label>
<input type="text" name="destination" placeholder="e.g. data warehouse" />
</div>
<div id="monitor-group" data-group="monitor-group" class="prop-group">
<label>Monitoring Tool</label>
<select name="tool">
<option value="Prometheus">Prometheus</option>
<option value="Datadog">Datadog</option>
<option value="New Relic">New Relic</option>
<option value="Grafana Cloud">Grafana Cloud</option>
</select>
<label>Alert Threshold (%)</label>
<input type="number" name="alertThreshold" min="0" max="100" />
</div>
<div id="third-party-group" data-group="third-party-group" class="prop-group">
<label>Provider</label>
<input type="text" name="provider" />
<label>Latency (ms)</label>
<input type="number" name="latency" min="0" />
</div>
<!-- PUT NEW COMPONENTS BEFORE THIS BUTTON -->
<button id="node-props-save" disabled>save</button>
</div>
<div id="bottom-panel">
<button id="run-button" disabled>Test Design</button>
</div>
</div>
</div>
<!-- Metrics-->
<!-- <div id="content3" class="tab-content">This is Tab 3 content.</div> -->
</div>
</div>
</div>
</div>
{{ end }}

12
static/challenges.html

@ -1,12 +0,0 @@
{{ define "challenges" }}
<div id="challenge-container">
<h2 class="sidebar-title">Challenges</h2>
<ul class="challenge-list">
{{range .Levels}}
<li class="challenge-item {{if eq .ID $.Level.ID}}active{{end}}">
<div class="challenge-name"><a href="/play/{{.ID}}">{{.Name}}</a></div>
</li>
{{end}}
</ul>
</div>
{{ end }}

41
static/chat.html

@ -1,41 +0,0 @@
{{ define "chat" }}
<label for="chat-checkbox">
<div aria-label="Send message" id="start-chat">
<svg class="chat-bubble" width="32" height="32" viewBox="0 0 64 64" xmlns="http://www.w3.org/2000/svg"
fill="none" stroke="white" stroke-width="4">
<path
d="M4 12C4 7.58 8.03 4 13 4h38c4.97 0 9 3.58 9 8v24c0 4.42-4.03 8-9 8H22l-12 12v-12H13c-4.97 0-9-3.58-9-8V12z" />
</svg>
</div>
</label>
<input type="checkbox" name="chat-checkbox" id="chat-checkbox" class="chat-checkbox" />
<div class="chat" id="chat-box">
<div id="chat-header">
<p class="chat-title">System Design Assistant</p>
<p class="powered-by">Powered by AI</p>
</div>
<section id="messages">
<div class="loading-indicator" id="loading-indicator">
<div class="loading-dots">
<div class="loading-dot"></div>
<div class="loading-dot"></div>
<div class="loading-dot"></div>
</div>
<span>loading...</span>
</div>
</section>
<footer>
<textarea name="chat-message" placeholder="Type your message here..." disabled id="chat-message-box"></textarea>
<button aria-label="Send message">
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5"
stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round"
d="M6 12 3.269 3.125A59.769 59.769 0 0 1 21.485 12 59.768 59.768 0 0 1 3.27 20.875L5.999 12Zm0 0h7.5" />
</svg>
</button>
</footer>
</div>
{{ end }}

389
static/commands.js

@ -1,389 +0,0 @@
/**
* Command Pattern Implementation
*
* This system encapsulates user actions as command objects, making the codebase
* more maintainable and providing a foundation for features like undo/redo.
*/
import { PluginRegistry } from './pluginRegistry.js';
import { generateDefaultProps } from './utils.js';
import { ComponentNode } from './node.js';
// Base Command interface
export class Command {
/**
* Execute the command
* @param {CanvasApp} app - The application context
*/
execute(app) {
throw new Error('Command must implement execute() method');
}
/**
* Optional: Undo the command (for future undo/redo system)
* @param {CanvasApp} app - The application context
*/
undo(app) {
// Optional implementation - most commands won't need this initially
}
/**
* Optional: Get command description for logging/debugging
*/
getDescription() {
return this.constructor.name;
}
}
// Command Invoker - manages command execution and history
export class CommandInvoker {
constructor(app) {
this.app = app;
this.history = [];
this.maxHistorySize = 100; // Prevent memory leaks
}
/**
* Execute a command and add it to history
* @param {Command} command
*/
execute(command) {
try {
command.execute(this.app);
// Add to history (for future undo system)
this.history.push(command);
if (this.history.length > this.maxHistorySize) {
this.history.shift();
}
} catch (error) {
throw error;
}
}
/**
* Future: Undo last command
*/
undo() {
if (this.history.length === 0) return;
const command = this.history.pop();
if (command.undo) {
command.undo(this.app);
}
}
/**
* Get command history for debugging
*/
getHistory() {
return this.history.map(cmd => cmd.getDescription());
}
}
// =============================================================================
// TAB NAVIGATION COMMANDS
// =============================================================================
export class SwitchToResourcesTabCommand extends Command {
execute(app) {
const requirementstab = app.tabs[1];
const resourcestab = app.tabs[2];
requirementstab.checked = false;
resourcestab.checked = true;
}
}
export class SwitchToDesignTabCommand extends Command {
execute(app) {
const requirementstab = app.tabs[1];
const designtab = app.tabs[1]; // Note: This looks like a bug in original - should be tabs[0]?
requirementstab.checked = false;
designtab.checked = true;
}
}
// =============================================================================
// TOOL COMMANDS
// =============================================================================
export class ToggleArrowModeCommand extends Command {
execute(app) {
app.arrowMode = !app.arrowMode;
if (app.arrowMode) {
app.arrowToolBtn.classList.add('active');
// Use observer to notify that arrow mode is enabled (will hide props panel)
app.connectionModeSubject.notifyConnectionModeChanged(true);
} else {
app.arrowToolBtn.classList.remove('active');
if (app.connectionStart) {
app.connectionStart.group.classList.remove('selected');
app.connectionStart = null;
}
// Use observer to notify that arrow mode is disabled
app.connectionModeSubject.notifyConnectionModeChanged(false);
}
}
}
// =============================================================================
// CHAT COMMANDS
// =============================================================================
export class StartChatCommand extends Command {
execute(app) {
const scheme = location.protocol === "https:" ? "wss://" : "ws://";
app.ws = new WebSocket(scheme + location.host + "/ws");
app.ws.onopen = () => {
app.ws.send(JSON.stringify({
'designPayload': JSON.stringify(app.exportDesign()),
'message': ''
}));
};
app.ws.onmessage = (e) => {
app.chatLoadingIndicator.style.display = 'none';
app.chatTextField.disabled = false;
app.chatTextField.focus();
const message = document.createElement('p');
message.innerHTML = e.data;
message.className = "other";
app.chatMessages.insertBefore(message, app.chatLoadingIndicator);
};
app.ws.onerror = (err) => {
console.log("ws error:", err);
app._scheduleReconnect();
};
app.ws.onclose = () => {
console.log("leaving chat...");
app.ws = null;
app._scheduleReconnect();
};
}
}
export class SendChatMessageCommand extends Command {
constructor(message) {
super();
this.message = message;
}
execute(app) {
const messageElement = document.createElement('p');
messageElement.innerHTML = this.message;
messageElement.className = "me";
app.chatMessages.insertBefore(messageElement, app.chatLoadingIndicator);
app.ws.send(JSON.stringify({
'message': this.message,
'designPayload': JSON.stringify(app.exportDesign()),
}));
app.chatTextField.value = '';
app.chatLoadingIndicator.style.display = 'block';
}
}
// =============================================================================
// DRAG & DROP COMMANDS
// =============================================================================
export class HandleDragStartCommand extends Command {
constructor(event) {
super();
this.event = event;
}
execute(app) {
const type = this.event.target.getAttribute('data-type');
const plugin = PluginRegistry.get(type);
if (!plugin) return;
this.event.dataTransfer.setData('text/plain', type);
}
}
export class HandleDragEndCommand extends Command {
constructor(event) {
super();
this.event = event;
}
execute(app) {
if (this.event.target.classList.contains('component-icon')) {
this.event.target.classList.remove('dragging');
}
}
}
export class DropComponentCommand extends Command {
constructor(event) {
super();
this.event = event;
}
execute(app) {
const type = this.event.dataTransfer.getData('text/plain');
const plugin = PluginRegistry.get(type);
if (!plugin) return;
const pt = app.canvas.createSVGPoint();
pt.x = this.event.clientX;
pt.y = this.event.clientY;
const svgP = pt.matrixTransform(app.canvas.getScreenCTM().inverse());
const x = svgP.x - app.componentSize.width / 2;
const y = svgP.y - app.componentSize.height / 2;
const props = generateDefaultProps(plugin);
const node = new ComponentNode(type, x, y, app, props);
node.x = x;
node.y = y;
}
}
// =============================================================================
// SIMULATION COMMANDS
// =============================================================================
export class RunSimulationCommand extends Command {
async execute(app) {
const designData = app.exportDesign();
// Try to get level info from URL or page context
const levelInfo = app.getLevelInfo();
const requestBody = {
design: designData,
...levelInfo
};
console.log('Sending design to simulation:', JSON.stringify(requestBody));
// Disable button and show loading state
app.runButton.disabled = true;
app.runButton.textContent = 'Running Simulation...';
try {
const response = await fetch('/simulate', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(requestBody)
});
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
// Check if response is a redirect (status 303)
if (response.redirected || response.status === 303) {
// Follow the redirect to the result page
window.location.href = response.url;
return;
}
// If we get here, something went wrong - the server should always redirect
console.error('Unexpected response from server - expected redirect but got:', response.status);
app.showError('Unexpected server response. Please try again.');
} catch (error) {
console.error('Network error:', error);
app.showError('Failed to run simulation: ' + error.message);
} finally {
// Re-enable button
app.runButton.disabled = false;
app.runButton.textContent = 'Test Design';
}
}
}
// =============================================================================
// CANVAS INTERACTION COMMANDS
// =============================================================================
export class HandleCanvasClickCommand extends Command {
constructor(event) {
super();
this.event = event;
}
execute(app) {
// Delegate to current state
app.stateMachine.handleCanvasClick(this.event);
}
}
export class SaveNodePropertiesCommand extends Command {
execute(app) {
if (!app.activeNode) return;
const node = app.activeNode;
const panel = app.nodePropsPanel;
const plugin = PluginRegistry.get(node.type);
if (!plugin || !plugin.props) {
return;
}
// Loop through plugin-defined props and update the node
for (const prop of plugin.props) {
const input = panel.querySelector(`[name='${prop.name}']`);
if (!input) continue;
let value;
if (prop.type === 'number') {
value = parseFloat(input.value);
if (isNaN(value)) value = prop.default ?? 0;
} else {
value = input.value;
}
node.props[prop.name] = value;
if (prop.name === 'label') {
node.updateLabel(value);
}
}
}
}
export class DeleteSelectionCommand extends Command {
constructor(key) {
super();
this.key = key;
}
execute(app) {
if (this.key === 'Backspace' || this.key === 'Delete') {
if (app.selectedConnection) {
app.canvas.removeChild(app.selectedConnection.line);
app.canvas.removeChild(app.selectedConnection.text);
const index = app.connections.indexOf(app.selectedConnection);
if (index !== -1) app.connections.splice(index, 1);
app.selectedConnection = null;
} else if (app.selectedNode) {
app.canvas.removeChild(app.selectedNode.group);
app.placedComponents = app.placedComponents.filter(n => n !== app.selectedNode);
app.connections = app.connections.filter(conn => {
if (conn.start === app.selectedNode || conn.end === app.selectedNode) {
app.canvas.removeChild(conn.line);
app.canvas.removeChild(conn.text);
return false;
}
return true;
});
app.selectedNode = null;
app.activeNode = null;
}
}
}
}

20
static/connection.js

@ -41,15 +41,7 @@ export class Connection {
this.selected = false; this.selected = false;
this.line.addEventListener('click', (e) => { this.line.addEventListener('click', (e) => {
e.stopPropagation(); e.stopPropagation();
// Clear node selection via observer this.app.clearSelection();
if (this.app.selectedNode) {
this.app.nodeSelectionSubject.notifyNodeDeselected(this.app.selectedNode);
this.app.selectedNode = null;
}
// Clear any previously selected connection
if (this.app.selectedConnection) {
this.app.selectedConnection.deselect();
}
this.select(); this.select();
}); });
@ -125,15 +117,7 @@ export class Connection {
} }
select() { select() {
// Clear node selection via observer this.app.clearSelection();
if (this.app.selectedNode) {
this.app.nodeSelectionSubject.notifyNodeDeselected(this.app.selectedNode);
this.app.selectedNode = null;
}
// Clear any previously selected connection
if (this.app.selectedConnection) {
this.app.selectedConnection.deselect();
}
this.selected = true; this.selected = true;
this.line.setAttribute('stroke', '#007bff'); this.line.setAttribute('stroke', '#007bff');
this.line.setAttribute('stroke-width', 3); this.line.setAttribute('stroke-width', 3);

13
static/difficulty-select.html

@ -607,6 +607,19 @@
window.location.href = '/game-modes'; window.location.href = '/game-modes';
} }
}); });
// Add hover sound effects (you'd need actual audio files)
document.querySelectorAll('.difficulty-card').forEach(card => {
card.addEventListener('mouseenter', () => {
// Play hover sound
console.log('🔊 Hover sound effect');
});
card.addEventListener('click', () => {
// Play click sound
console.log('🔊 Click sound effect');
});
});
</script> </script>
</body> </body>
</html> </html>

54
static/failure.html

@ -125,12 +125,6 @@
50% { opacity: 0.8; } 50% { opacity: 0.8; }
} }
.header-logo-container {
display: flex;
align-items: center;
gap: 12px;
}
.header-text { .header-text {
font-size: 24px; font-size: 24px;
margin: 0; margin: 0;
@ -138,19 +132,6 @@
transition: all 3s ease-in-out; transition: all 3s ease-in-out;
} }
.beta-pill {
background: linear-gradient(45deg, #ff6b35, #f7931e);
color: white;
padding: 4px 8px;
border-radius: 12px;
font-size: 0.7rem;
font-weight: 700;
text-transform: uppercase;
letter-spacing: 0.5px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3);
transition: all 3s ease-in-out;
}
.recovering .header-text { .recovering .header-text {
text-shadow: 0 0 10px rgba(0, 255, 136, 0.8) text-shadow: 0 0 10px rgba(0, 255, 136, 0.8)
} }
@ -655,43 +636,34 @@
<div class="screen-overlay"></div> <div class="screen-overlay"></div>
<div class="header"> <div class="header">
<div class="header-logo-container"> <h1 class="header-text">System Design Game</h1>
<h1 class="header-text">System Design Game</h1>
<div class="beta-pill">BETA</div>
</div>
</div> </div>
<div class="failure-container"> <div class="failure-container">
<h1 class="failure-title glitch" data-text="SYSTEM OVERLOAD" id="failureTitle">SYSTEM OVERLOAD</h1> <h1 class="failure-title glitch" data-text="SYSTEM OVERLOAD" id="failureTitle">SYSTEM OVERLOAD</h1>
<p class="failure-subtitle" id="failureSubtitle">{{.LevelName}} - Your architecture couldn't handle the load</p> <p class="failure-subtitle" id="failureSubtitle">Your architecture couldn't handle the load</p>
<div class="failure-details"> <div class="failure-details">
<div class="failure-reason" id="failureReason"> <div class="failure-reason" id="failureReason">
Critical system failure detected. Your design exceeded operational limits. Critical system failure detected. Your design exceeded operational limits.
{{if .FailedReqs}}
<br><br>Failed requirements:
{{range .FailedReqs}}
<br>• {{.}}
{{end}}
{{end}}
</div> </div>
<div class="failure-metrics"> <div class="failure-metrics">
<div class="metric-item"> <div class="metric-item">
<div class="metric-label">Target RPS</div> <div class="metric-label">Target RPS</div>
<div class="metric-value">{{.TargetRPS | printf "%d"}}</div> <div class="metric-value">10,000</div>
</div> </div>
<div class="metric-item"> <div class="metric-item">
<div class="metric-label">Achieved RPS</div> <div class="metric-label">Achieved RPS</div>
<div class="metric-value exceeded">{{.AchievedRPS | printf "%d"}}</div> <div class="metric-value exceeded">2,847</div>
</div> </div>
<div class="metric-item"> <div class="metric-item">
<div class="metric-label">Max Latency</div> <div class="metric-label">Max Latency</div>
<div class="metric-value">{{.TargetLatency}}ms</div> <div class="metric-value">200ms</div>
</div> </div>
<div class="metric-item"> <div class="metric-item">
<div class="metric-label">Actual Latency</div> <div class="metric-label">Actual Latency</div>
<div class="metric-value exceeded">{{.ActualLatency | printf "%.0f"}}ms</div> <div class="metric-value exceeded">1,247ms</div>
</div> </div>
</div> </div>
@ -700,9 +672,6 @@
<div class="error-line">[ERROR] Load balancer timeout after 30s</div> <div class="error-line">[ERROR] Load balancer timeout after 30s</div>
<div class="error-line">[ERROR] Cache miss ratio: 89%</div> <div class="error-line">[ERROR] Cache miss ratio: 89%</div>
<div class="error-line">[FATAL] System unresponsive - shutting down</div> <div class="error-line">[FATAL] System unresponsive - shutting down</div>
{{range .FailedReqs}}
<div class="error-line">[ERROR] {{.}}</div>
{{end}}
</div> </div>
</div> </div>
@ -741,20 +710,11 @@
// Hide recovery message after transition // Hide recovery message after transition
setTimeout(() => { setTimeout(() => {
if (typeof recoveryMessage !== 'undefined') { recoveryMessage.classList.remove('show');
recoveryMessage.classList.remove('show');
}
}, 3000); }, 3000);
}, 1000); }, 1000);
}, 15000); }, 15000);
// Add retry function
function retryLevel() {
const levelId = '{{.LevelID}}';
const retryUrl = levelId ? `/play/${levelId}?retry=true` : '/game?retry=true';
window.location.href = retryUrl;
}
// Add some random glitch effects (only during failure state) // Add some random glitch effects (only during failure state)
function addRandomGlitch() { function addRandomGlitch() {
if (!document.body.classList.contains('recovering')) { if (!document.body.classList.contains('recovering')) {

4
static/game-mode.css

@ -409,8 +409,6 @@ body {
color: var(--color-text-muted); color: var(--color-text-muted);
} }
/* === RESPONSIVE === */ /* === RESPONSIVE === */
@media (max-width: 1024px) { @media (max-width: 1024px) {
.game-mode-grid { .game-mode-grid {
@ -439,4 +437,4 @@ body {
.stats-grid { .stats-grid {
grid-template-columns: 1fr; grid-template-columns: 1fr;
} }
} }

380
static/game.html

@ -1,27 +1,375 @@
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<head>
<head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>System Design Game</title> <title>System Design Game</title>
<link rel="stylesheet" type="text/css" href="/static/style.css" /> <link rel="stylesheet" type="text/css" href="/static/style.css" />
</head> </head>
<body>
<body>
<div id="page-container"> <div id="page-container">
{{ template "header" . }} <div id="sd-header">
<div id="main-content"> <h1 class="header-text">System Design Game</h1>
{{ template "challenges" . }} {{ if and .Username .Avatar }}
<div class="userbox">
<img src="{{ .Avatar }}" class="avatar" />
<span class="username">{{ .Username }}</span>
</div>
{{ else }}
<a href="/login" id="github-login-btn">
<img src="https://cdn.jsdelivr.net/gh/devicons/devicon/icons/github/github-original.svg" alt="GitHub Logo">
Login with GitHub
</a>
{{ end }}
</div>
<div id="main-content">
<div id="challenge-container">
<h2 class="sidebar-title">Challenges</h2>
<ul class="challenge-list">
{{range .Levels}}
<li class="challenge-item {{if and (eq .Name $.Level.Name) (eq .Difficulty $.Level.Difficulty)}}active{{end}}">
<div class="challenge-name">{{.Name}}</div>
<div class="challenge-difficulty {{.Difficulty}}">{{.Difficulty}}</div>
</li>
{{end}}
</ul>
</div>
<div id="canvas-wrapper">
<input type="radio" id="tab1" name="tab" checked>
<input type="radio" id="tab2" name="tab">
<input type="radio" id="tab3" name="tab">
<div class="tabs">
<div class="tab-labels">
<label for="tab1">Requirements</label>
<label for="tab2">Design</label>
<label for="tab3">Metrics</label>
</div>
<!-- Requirements -->
<div id="content1" class="tab-content">
{{ if .Level.InterviewerRequirements }}
<div class="requirements-section">
<h3>Interviewer Requirements</h3>
<ul class="requirements-list">
{{ range .Level.InterviewerRequirements }}
<li class="requirement-item">{{ . }}</li>
{{ end }}
</ul>
</div>
{{ end }}
{{ if .Level.FunctionalRequirements }}
<div class="requirements-section">
<h3>Functional Requirements</h3>
<ul class="requirements-list">
{{ range .Level.FunctionalRequirements }}
<li class="requirement-item">{{ . }}</li>
{{ end }}
</ul>
</div>
{{ end }}
{{ if .Level.NonFunctionalRequirements }}
<div class="requirements-section">
<h3>Non-Functional Requirements</h3>
<ul class="requirements-list">
{{ range .Level.NonFunctionalRequirements }}
<li class="requirement-item">{{ . }}</li>
{{ end }}
</ul>
</div>
{{ end }}
</div>
<!-- Design-->
<div id="content2" class="tab-content">
<div id="sidebar">
<div class="component-icon" draggable="true" data-type="user">
user
</div>
<div class="component-icon" draggable="true" data-type="loadBalancer">
load balancer
</div>
<div class="component-icon" draggable="true" data-type="webserver">
webserver
</div>
<div class="component-icon" draggable="true" data-type="database">
database
</div>
<div class="component-icon" draggable="true" data-type="cache">
cache
</div>
<div class="component-icon" draggable="true" data-type="messageQueue">
message queue
</div>
<div class="component-icon" draggable="true" data-type="cdn">
CDN
</div>
<div class="component-icon" draggable="true" data-type="microservice">
microservice node
</div>
<div class="component-icon" draggable="true" data-type="data pipeline">
data pipeline
</div>
<div class="component-icon" draggable="true" data-type="monitoring/alerting">
monitoring/alerting
</div>
<div class="component-icon" draggable="true" data-type="third party service">
third-party service
</div>
</div>
<div id="canvas-container">
<div id="connection-modal" style="display: none;" class="modal">
<div class="modal-content">
<h3>Create Connection</h3>
<label>
Label:
<input type="text" id="connection-label" value="Read traffic">
</label>
<label>
Protocol:
<select id="connection-protocol">
<option>HTTP</option>
<option>HTTPS</option>
<option>gRPC</option>
<option>WebSocket</option>
<option>GraphQL</option>
<option>Kafka</option>
<option>AMQP</option>
<option>MQTT</option>
<option>SQL</option>
<option>NoSQL</option>
<option>Redis</option>
<option>TLS</option>
</select>
</label>
<label style="margin-top: 10px;">
<input type="checkbox" id="connection-tls">
Enable TLS (encryption)
</label>
<label for="connection-capacity">Capacity Limit (RPS):</label>
<input type="number" id="connection-capacity" value="1000" min="1" />
<div class="modal-actions">
<button id="connection-save">Save</button>
<button id="connection-cancel">Cancel</button>
</div>
</div>
</div>
<div id="canvas-toolbar">
<button id="arrow-tool-btn" class="toolbar-btn">Arrow Tool</button>
</div>
<div id="info-panel">
<div id="constraints-panel">
<div class="panel-title">level constraints</div>
<div class="panel-metric"><span class="label">🎯 target rps:</span> <span id="constraint-rps">{{.Level.TargetRPS}}</span></div>
<div class="panel-metric"><span class="label"> max p95 latency:</span> <span id="constraint-latency">{{.Level.MaxP95LatencyMs}}ms</span></div>
<div class="panel-metric"><span class="label">💸 max cost:</span> <span id="constraint-cost">${{.Level.MaxMonthlyUSD}}</span></div>
<div class="panel-metric"><span class="label">🔒 availability:</span> <span id="constraint-availability">{{printf "%.2f" .Level.RequiredAvailabilityPct}}%</span></div>
</div>
</div>
<svg id="canvas">
<defs>
<marker id="arrowhead-start" markerWidth="10" markerHeight="7" refX="0" refY="3.5"
orient="auto" markerUnits="strokeWidth">
<path d="M10 0 L0 3.5 L10 7" fill="#ccc" />
</marker>
<marker id="arrowhead-end" markerWidth="10" markerHeight="7" refX="10" refY="3.5"
orient="auto" markerUnits="strokeWidth">
<path d="M0 0 L10 3.5 L0 7" fill="#ccc" />
</marker>
</defs>
</svg>
<div id="node-props-panel">
<h3>node properties</h3>
<div id="label-group" data-group="label-group">
<label>label:</label>
<input type="text" name="label" />
</div>
<div id="db-group" class="prop-group" data-group="db-group">
<label>replication factor:<input type="number" name="replication" min="1" step="1" /></label>
</div>
<div id="cache-group" class="prop-group" data-group="cache-group">
<label>cache ttl (secs):<input type="number" name="cacheTTL" min="0" step="60" /></label>
<label>Max Entries: <input name="maxEntries" type="number" /></label>
<label>Eviction Policy:
<select name="evictionPolicy">
<option value="LRU">LRU</option>
<option value="LFU">LFU</option>
<option value="Random">Random</option>
</select>
</label>
</div>
<div id="compute-group" data-group="compute-group" class="prop-group">
<label>CPU Cores:</label>
<input type="number" name="cpu" min="1" />
{{ template "canvas" . }} <label>RAM (GB):</label>
<input type="number" name="ramGb" min="1" />
{{ template "chat" . }} <label>RPS Capacity:</label>
<script> <input type="number" name="rpsCapacity" min="1" />
window.levelData = {{.LevelPayload}};
</script>
<script type="module" src="/static/index.js"></script>
</body>
<label>Monthly Cost (USD):</label>
<input type="number" name="monthlyCostUsd" min="0" />
</div>
<div id="lb-group" data-group="lb-group" class="prop-group">
<label>Algorithm</label>
<select name="algorithm">
<option value="round-robin">Round Robin</option>
<option value="least-connections">Least Connections</option>
</select>
</div>
<div id="mq-group" data-group="mq-group" class="prop-group">
<label>Queue Capacity (,ax Messages that can be held in que)</label>
<input type="number" name="queueCapacity" min="1" />
<label>Retention Time (seconds)</label>
<input type="number" name="retentionSeconds" min="1" />
</div>
<div id="cdn-group" data-group="cdn-group" class="prop-group">
<label>TTL (seconds)</label>
<input type="number" name="ttl" min="1" />
<label>Geo Replication</label>
<select name="geoReplication">
<option value="global">Global</option>
<option value="regional">Regional</option>
<option value="custom">Custom</option>
</select>
<label>Caching Strategy</label>
<select name="cachingStrategy">
<option value="cache-first">Cache First</option>
<option value="network-first">Network First</option>
<option value="stale-while-revalidate">Stale While Revalidate</option>
</select>
<label>Compression</label>
<select name="compression">
<option value="brotli">Brotli</option>
<option value="gzip">Gzip</option>
<option value="none">None</option>
</select>
<label>HTTP/2 Support</label>
<select name="http2">
<option value="enabled">Enabled</option>
<option value="disabled">Disabled</option>
</select>
</div>
<div id="microservice-group" data-group="microservice-group" class="prop-group">
<label>
Instance Count:
<input type="number" name="instanceCount" value="3" min="1" />
</label>
<label>
CPU (vCPUs):
<input type="number" name="cpu" value="2" min="1" />
</label>
<label>
RAM (GB):
<input type="number" name="ramGb" value="4" min="1" />
</label>
<label>
RPS Capacity:
<input type="number" name="rpsCapacity" value="150" min="1" />
</label>
<label>
Monthly Cost (USD):
<input type="number" name="monthlyUsd" value="18" min="0" step="1" />
</label>
<label>
Scaling Strategy:
<select name="scalingStrategy">
<option value="auto" selected>Auto</option>
<option value="manual">Manual</option>
</select>
</label>
<label>
API Version:
<input type="text" name="apiVersion" value="v1" />
</label>
</div>
<div id="datapipeline-group" data-group="pipeline-group" class="prop-group">
<label>Batch Size</label>
<input type="number" name="batchSize" min="1" />
<label>Schedule</label>
<select name="schedule">
<option value="realtime">Real-time</option>
<option value="hourly">Hourly</option>
<option value="daily">Daily</option>
<option value="weekly">Weekly</option>
</select>
<label>Transformations</label>
<select name="transformations">
<option value="normalize">Normalize</option>
<option value="dedupe">Dedupe</option>
<option value="filter">Filter</option>
<option value="enrich">Enrich</option>
<option value="aggregate">Aggregate</option>
</select>
<label>Destination</label>
<input type="text" name="destination" placeholder="e.g. data warehouse" />
</div>
<div id="monitor-group" data-group="monitor-group" class="prop-group">
<label>Monitoring Tool</label>
<select name="tool">
<option value="Prometheus">Prometheus</option>
<option value="Datadog">Datadog</option>
<option value="New Relic">New Relic</option>
<option value="Grafana Cloud">Grafana Cloud</option>
</select>
<label>Alert Threshold (%)</label>
<input type="number" name="alertThreshold" min="0" max="100" />
</div>
<div id="third-party-group" data-group="third-party-group" class="prop-group">
<label>Provider</label>
<input type="text" name="provider" />
<label>Latency (ms)</label>
<input type="number" name="latency" min="0" />
</div>
<!-- PUT NEW COMPONENTS BEFORE THIS BUTTON -->
<button id="node-props-save" disabled>save</button>
</div>
<div id="bottom-panel">
<button id="run-button" disabled>Test Design</button>
</div>
</div>
</div>
<!-- Metrics-->
<div id="content3" class="tab-content">This is Tab 3 content.</div>
</div>
</div>
</div>
</div>
<script type="module" src="/static/index.js"></script>
</body>
</html> </html>

19
static/header.html

@ -1,19 +0,0 @@
{{ define "header" }}
<div id="sd-header">
<div class="header-logo-container">
<h1 class="header-text">System Design Game</h1>
<div class="beta-pill">BETA</div>
</div>
{{ if and .Username .Avatar }}
<div class="userbox">
<img src="{{ .Avatar }}" class="avatar" />
<span class="username">{{ .Username }}</span>
</div>
{{ else }}
<a href="/login" id="github-login-btn">
<img src="https://cdn.jsdelivr.net/gh/devicons/devicon/icons/github/github-original.svg" alt="GitHub Logo">
Login with GitHub
</a>
{{ end }}
</div>
{{ end }}

134
static/index.html

@ -45,12 +45,6 @@
align-items: center; align-items: center;
} }
.logo-container {
display: flex;
align-items: center;
gap: 12px;
}
.logo { .logo {
font-size: 1.5rem; font-size: 1.5rem;
font-weight: 700; font-weight: 700;
@ -58,17 +52,6 @@
text-shadow: 0 0 10px rgba(0, 255, 136, 0.5); text-shadow: 0 0 10px rgba(0, 255, 136, 0.5);
} }
.beta-pill {
background: linear-gradient(45deg, #ff6b35, #f7931e);
color: white;
padding: 4px 8px;
border-radius: 12px;
font-size: 0.7rem;
font-weight: 600;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.nav-links { .nav-links {
display: flex; display: flex;
gap: 30px; gap: 30px;
@ -85,50 +68,6 @@
color: #00ff88; color: #00ff88;
} }
.header-actions {
display: flex;
align-items: center;
gap: 20px;
}
.login-button {
background: linear-gradient(135deg, #00ff88, #00cc6a);
color: #000;
padding: 8px 16px;
border-radius: 6px;
text-decoration: none;
font-weight: 600;
font-size: 0.9rem;
transition: all 0.3s ease;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.login-button:hover {
transform: translateY(-2px);
box-shadow: 0 8px 25px rgba(0, 255, 136, 0.3);
color: #000;
}
.go-play-button {
background: linear-gradient(135deg, #00ff88, #00cc6a);
color: #000;
padding: 8px 16px;
border-radius: 6px;
text-decoration: none;
font-weight: 600;
font-size: 0.9rem;
transition: all 0.3s ease;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.go-play-button:hover {
transform: translateY(-2px);
box-shadow: 0 8px 25px rgba(0, 255, 136, 0.3);
color: #000;
}
.beta-badge { .beta-badge {
background: linear-gradient(45deg, #ff6b35, #f7931e); background: linear-gradient(45deg, #ff6b35, #f7931e);
color: white; color: white;
@ -739,15 +678,6 @@
.nav-links { .nav-links {
display: none; display: none;
} }
.header-actions {
gap: 10px;
}
.login-button {
padding: 6px 12px;
font-size: 0.8rem;
}
} }
.success-message { .success-message {
@ -829,23 +759,14 @@
<body> <body>
<header> <header>
<nav class="container"> <nav class="container">
<div class="logo-container"> <div class="logo">System Design Game</div>
<div class="logo">System Design Game</div>
<div class="beta-pill">BETA</div>
</div>
<ul class="nav-links"> <ul class="nav-links">
<li><a href="#problem">Problem</a></li> <li><a href="#problem">Problem</a></li>
<li><a href="#solution">Solution</a></li> <li><a href="#solution">Solution</a></li>
<li><a href="#how">How It Works</a></li> <li><a href="#how">How It Works</a></li>
<li><a href="#faq">FAQ</a></li> <li><a href="#faq">FAQ</a></li>
</ul> </ul>
<div class="header-actions"> <div class="beta-badge">COMING SOON</div>
<!-- Logged out state -->
<a href="/login" class="login-button" id="login-button">Login</a>
<!-- Logged in state -->
<a href="/play/chat-app" class="go-play-button" id="go-play-button" style="display: none;">Go Play</a>
</div>
</nav> </nav>
</header> </header>
@ -859,18 +780,18 @@
</div> </div>
<div class="cta-form"> <div class="cta-form">
<h3>🚀 Get Updates</h3> <h3>🚀 Get Early Access</h3>
<form class="final-cta-form" id="finalBetaForm" action="https://gmail.us7.list-manage.com/subscribe/post?u=913ad95101d97bff0b1873301&amp;id=77dabc87db&amp;f_id=0070c9e4f0" method="post" target="_blank" novalidate> <form class="final-cta-form" id="finalBetaForm" action="https://gmail.us7.list-manage.com/subscribe/post?u=913ad95101d97bff0b1873301&amp;id=77dabc87db&amp;f_id=0070c9e4f0" method="post" target="_blank" novalidate>
<input type="email" name="EMAIL" id="mce-EMAIL" placeholder="Enter your email" required> <input type="email" name="EMAIL" id="mce-EMAIL" placeholder="Enter your email" required>
<div style="position: absolute; left: -5000px;" aria-hidden="true"> <div style="position: absolute; left: -5000px;" aria-hidden="true">
<input type="text" name="b_913ad95101d97bff0b1873301_77dabc87db" tabindex="-1" value=""> <input type="text" name="b_913ad95101d97bff0b1873301_77dabc87db" tabindex="-1" value="">
</div> </div>
<button type="submit">Get Updates</button> <button type="submit">Join Waitlist</button>
</form> </form>
<div class="success-message" id="successMessage"> <div class="success-message" id="successMessage">
✅ You're in! We'll keep you updated on our progress. ✅ You're in! We'll notify you when beta access is available.
</div> </div>
<p class="beta-info">🔥 Stay updated on our latest developments</p> <p class="beta-info">🔥 Be the first to know when we launch</p>
</div> </div>
</div> </div>
</div> </div>
@ -1053,7 +974,7 @@
<div class="faq-list"> <div class="faq-list">
<div class="faq-item"> <div class="faq-item">
<div class="faq-question">When will the beta be available?</div> <div class="faq-question">When will the beta be available?</div>
<div class="faq-answer">We're currently in development and aiming to launch in the coming months. Sign up for updates to be notified as soon as it's ready.</div> <div class="faq-answer">We're currently in development and aiming to launch the beta in the coming months. Sign up for the waitlist to be notified as soon as it's ready.</div>
</div> </div>
<div class="faq-item"> <div class="faq-item">
<div class="faq-question">Do I need to install anything to use the System Design Game?</div> <div class="faq-question">Do I need to install anything to use the System Design Game?</div>
@ -1078,13 +999,13 @@
<section class="final-cta"> <section class="final-cta">
<div class="container"> <div class="container">
<h2>Ready to Level Up Your System Design Skills?</h2> <h2>Ready to Level Up Your System Design Skills?</h2>
<p>Get updates and be the first to know about our latest platform developments.</p> <p>Join the waitlist and be the first to know when our interactive browser-based platform launches.</p>
<form class="final-cta-form" id="finalBetaForm" action="https://gmail.us7.list-manage.com/subscribe/post?u=913ad95101d97bff0b1873301&amp;id=77dabc87db&amp;f_id=0070c9e4f0" method="post" target="_blank" novalidate> <form class="final-cta-form" id="finalBetaForm" action="https://gmail.us7.list-manage.com/subscribe/post?u=913ad95101d97bff0b1873301&amp;id=77dabc87db&amp;f_id=0070c9e4f0" method="post" target="_blank" novalidate>
<input type="email" name="EMAIL" id="mce-EMAIL" placeholder="Enter your email" required> <input type="email" name="EMAIL" id="mce-EMAIL" placeholder="Enter your email" required>
<div style="position: absolute; left: -5000px;" aria-hidden="true"> <div style="position: absolute; left: -5000px;" aria-hidden="true">
<input type="text" name="b_913ad95101d97bff0b1873301_77dabc87db" tabindex="-1" value=""> <input type="text" name="b_913ad95101d97bff0b1873301_77dabc87db" tabindex="-1" value="">
</div> </div>
<button type="submit">Get Updates</button> <button type="submit">Join Waitlist</button>
</form> </form>
</div> </div>
</section> </section>
@ -1099,6 +1020,21 @@
</footer> </footer>
<script> <script>
// Form handling
document.getElementById('betaForm').addEventListener('submit', function(e) {
e.preventDefault();
const email = this.querySelector('input[type="email"]').value;
// Simulate API call
setTimeout(() => {
document.getElementById('successMessage').style.display = 'block';
this.style.display = 'none';
// Track conversion (in real app, send to analytics)
console.log('Beta signup:', email);
}, 500);
});
// Smooth scrolling // Smooth scrolling
document.querySelectorAll('a[href^="#"]').forEach(anchor => { document.querySelectorAll('a[href^="#"]').forEach(anchor => {
anchor.addEventListener('click', function (e) { anchor.addEventListener('click', function (e) {
@ -1112,28 +1048,6 @@
} }
}); });
}); });
// Check authentication state and toggle buttons
function checkAuthState() {
// Check if user is logged in (you can modify this logic based on your auth implementation)
const isLoggedIn = localStorage.getItem('isLoggedIn') === 'true' ||
document.cookie.includes('session=') ||
window.location.search.includes('logged_in=true');
const loginButton = document.getElementById('login-button');
const goPlayButton = document.getElementById('go-play-button');
if (isLoggedIn) {
loginButton.style.display = 'none';
goPlayButton.style.display = 'inline-block';
} else {
loginButton.style.display = 'inline-block';
goPlayButton.style.display = 'none';
}
}
// Check auth state on page load
document.addEventListener('DOMContentLoaded', checkAuthState);
</script> </script>
</body> </body>
</html> </html>

359
static/node.js

@ -2,186 +2,181 @@ import { Connection } from './connection.js';
import { generateNodeId, createSVGElement } from './utils.js'; import { generateNodeId, createSVGElement } from './utils.js';
export class ComponentNode { export class ComponentNode {
constructor(type, x, y, app, props = {}) { constructor(type, x, y, app, props = {}) {
this.id = generateNodeId(); this.id = generateNodeId();
this.type = type; this.type = type;
this.app = app; this.app = app;
this.props = { this.props = {
label: type, label: type,
replication: 1, replication: 1,
cacheTTL: 0, cacheTTL: 0,
instanceSize: 'medium', instanceSize: 'medium',
...props ...props
}; };
this.group = createSVGElement('g', { class: 'dropped', 'data-type': type }); this.group = createSVGElement('g', { class: 'dropped', 'data-type': type });
const rect = createSVGElement('rect', { const rect = createSVGElement('rect', {
x, x,
y, y,
width: 0, // will be updated after measuring text width: 0, // will be updated after measuring text
height: app.componentSize.height, height: app.componentSize.height,
fill: '#121212', fill: '#121212',
stroke: '#00ff88', stroke: '#00ff88',
'stroke-width': 1, 'stroke-width': 1,
rx: 4, rx: 4,
ry: 4 ry: 4
}); });
this.text = createSVGElement('text', { this.text = createSVGElement('text', {
x: x + app.componentSize.width / 2, x: x + app.componentSize.width / 2,
y: y + app.componentSize.height / 2 + 5, y: y + app.componentSize.height / 2 + 5,
'text-anchor': 'middle', 'text-anchor': 'middle',
'font-size': 16, 'font-size': 16,
fill: '#ccc' fill: '#ccc'
}); });
this.text.textContent = this.props.label; this.text.textContent = this.props.label;
// Temporarily add text to canvas to measure its width // Temporarily add text to canvas to measure its width
app.canvas.appendChild(this.text); app.canvas.appendChild(this.text);
const textWidth = this.text.getBBox().width; const textWidth = this.text.getBBox().width;
const padding = 20; const padding = 20;
const finalWidth = textWidth + padding; const finalWidth = textWidth + padding;
// Update rect width and center text // Update rect width and center text
rect.setAttribute('width', finalWidth); rect.setAttribute('width', finalWidth);
this.text.setAttribute('x', x + finalWidth / 2); this.text.setAttribute('x', x + finalWidth / 2);
// Clean up temporary text // Clean up temporary text
app.canvas.removeChild(this.text); app.canvas.removeChild(this.text);
this.group.appendChild(rect); this.group.appendChild(rect);
this.group.appendChild(this.text); this.group.appendChild(this.text);
this.group.__nodeObj = this; this.group.__nodeObj = this;
this.initDrag(); this.initDrag();
this.group.addEventListener('click', (e) => { this.group.addEventListener('click', (e) => {
e.stopPropagation(); e.stopPropagation();
if (app.arrowMode) { if (app.arrowMode) {
Connection.handleClick(this, app); Connection.handleClick(this, app);
} else { } else {
// Use observer to notify node selection app.clearSelection();
app.nodeSelectionSubject.notifyNodeSelected(this); this.select();
app.selectedNode = this; // Keep app state in sync for now }
} });
});
this.group.addEventListener('dblclick', (e) => {
this.group.addEventListener('dblclick', (e) => { e.stopPropagation();
e.stopPropagation(); if (!app.arrowMode) {
if (!app.arrowMode) { app.showPropsPanel(this);
// Use observer pattern instead of direct call }
//app.propertiesPanelSubject.notifyPropertiesPanelRequested(this); });
}
}); app.canvas.appendChild(this.group); // ✅ now correctly adding full group
app.placedComponents.push(this);
app.canvas.appendChild(this.group); // ✅ now correctly adding full group app.runButton.disabled = false;
app.placedComponents.push(this);
app.runButton.disabled = false; this.x = x;
this.y = y;
this.x = x; }
this.y = y;
} initDrag() {
let offsetX, offsetY;
initDrag() {
let offsetX, offsetY; const onMouseMove = (e) => {
const pt = this.app.canvas.createSVGPoint();
const onMouseMove = (e) => { pt.x = e.clientX;
const pt = this.app.canvas.createSVGPoint(); pt.y = e.clientY;
pt.x = e.clientX; const svgP = pt.matrixTransform(this.app.canvas.getScreenCTM().inverse());
pt.y = e.clientY;
const svgP = pt.matrixTransform(this.app.canvas.getScreenCTM().inverse()); const newX = svgP.x - offsetX;
const newY = svgP.y - offsetY;
const newX = svgP.x - offsetX;
const newY = svgP.y - offsetY; this.group.setAttribute('transform', `translate(${newX}, ${newY})`);
this.group.setAttribute('transform', `translate(${newX}, ${newY})`); this.x = newX;
this.y = newY;
this.x = newX;
this.y = newY; this.app.updateConnectionsFor(this);
};
this.app.updateConnectionsFor(this);
}; const onMouseUp = () => {
window.removeEventListener('mousemove', onMouseMove);
const onMouseUp = () => { window.removeEventListener('mouseup', onMouseUp);
window.removeEventListener('mousemove', onMouseMove); };
window.removeEventListener('mouseup', onMouseUp);
}; this.group.addEventListener('mousedown', (e) => {
e.preventDefault();
this.group.addEventListener('mousedown', (e) => { const pt = this.app.canvas.createSVGPoint();
e.preventDefault(); pt.x = e.clientX;
const pt = this.app.canvas.createSVGPoint(); pt.y = e.clientY;
pt.x = e.clientX; const svgP = pt.matrixTransform(this.app.canvas.getScreenCTM().inverse());
pt.y = e.clientY;
const svgP = pt.matrixTransform(this.app.canvas.getScreenCTM().inverse()); const ctm = this.group.getCTM();
offsetX = svgP.x - ctm.e;
const ctm = this.group.getCTM(); offsetY = svgP.y - ctm.f;
offsetX = svgP.x - ctm.e;
offsetY = svgP.y - ctm.f; window.addEventListener('mousemove', onMouseMove);
window.addEventListener('mouseup', onMouseUp);
window.addEventListener('mousemove', onMouseMove); });
window.addEventListener('mouseup', onMouseUp); }
});
} updateLabel(newLabel) {
this.props.label = newLabel;
updateLabel(newLabel) { this.text.textContent = newLabel;
this.props.label = newLabel; const textWidth = this.text.getBBox().width;
this.text.textContent = newLabel; const padding = 20;
const textWidth = this.text.getBBox().width; const finalWidth = textWidth + padding;
const padding = 20;
const finalWidth = textWidth + padding; this.group.querySelector('rect').setAttribute('width', finalWidth);
this.text.setAttribute('x', parseFloat(this.group.querySelector('rect').getAttribute('x')) + finalWidth / 2);
this.group.querySelector('rect').setAttribute('width', finalWidth);
this.text.setAttribute('x', parseFloat(this.group.querySelector('rect').getAttribute('x')) + finalWidth / 2); }
} getCenter() {
const bbox = this.group.getBBox();
getCenter() { const ctm = this.group.getCTM();
const bbox = this.group.getBBox(); const x = ctm.e + bbox.x + bbox.width / 2;
const ctm = this.group.getCTM(); const y = ctm.f + bbox.y + bbox.height / 2;
const x = ctm.e + bbox.x + bbox.width / 2; return { x, y };
const y = ctm.f + bbox.y + bbox.height / 2; }
return { x, y };
} select() {
this.app.clearSelection();
select() { this.group.classList.add('selected');
// Use observer to clear previous selection and select this node this.app.selectedNode = this;
if (this.app.selectedNode && this.app.selectedNode !== this) { }
this.app.nodeSelectionSubject.notifyNodeDeselected(this.app.selectedNode);
} deselect() {
this.group.classList.add('selected'); this.group.classList.remove('selected');
this.app.selectedNode = this; if (this.app.selectedNode === this) {
} this.app.selectedNode = null;
}
deselect() { }
this.group.classList.remove('selected');
if (this.app.selectedNode === this) { getConnectionPointToward(otherNode) {
this.app.selectedNode = null; const bbox = this.group.getBBox();
} const ctm = this.group.getCTM();
}
const centerX = ctm.e + bbox.x + bbox.width / 2;
getConnectionPointToward(otherNode) { const centerY = ctm.f + bbox.y + bbox.height / 2;
const bbox = this.group.getBBox();
const ctm = this.group.getCTM(); const otherCenter = otherNode.getCenter();
const centerX = ctm.e + bbox.x + bbox.width / 2; let edgeX = centerX;
const centerY = ctm.f + bbox.y + bbox.height / 2; let edgeY = centerY;
const otherCenter = otherNode.getCenter(); const dx = otherCenter.x - centerX;
const dy = otherCenter.y - centerY;
let edgeX = centerX;
let edgeY = centerY; if (Math.abs(dx) > Math.abs(dy)) {
edgeX += dx > 0 ? bbox.width / 2 : -bbox.width / 2;
const dx = otherCenter.x - centerX; } else {
const dy = otherCenter.y - centerY; edgeY += dy > 0 ? bbox.height / 2 : -bbox.height / 2;
}
if (Math.abs(dx) > Math.abs(dy)) {
edgeX += dx > 0 ? bbox.width / 2 : -bbox.width / 2; return { x: edgeX, y: edgeY };
} else { }
edgeY += dy > 0 ? bbox.height / 2 : -bbox.height / 2;
}
return { x: edgeX, y: edgeY };
}
} }

411
static/observers.js

@ -1,411 +0,0 @@
/**
* Dedicated Observer Pattern Implementation
*
* Each observer is dedicated to a particular concern and is type-safe.
* This provides clean separation of concerns and maintainable event handling.
*/
import { PluginRegistry } from './pluginRegistry.js';
/**
* NodeSelectionObserver - Dedicated to node selection events only
*/
export class NodeSelectionObserver {
constructor() {
this.observers = [];
}
// Add a specific observer for node selection changes
addObserver(observer) {
if (typeof observer.onNodeSelected !== 'function' ||
typeof observer.onNodeDeselected !== 'function') {
throw new Error('Observer must implement onNodeSelected and onNodeDeselected methods');
}
this.observers.push(observer);
}
removeObserver(observer) {
const index = this.observers.indexOf(observer);
if (index !== -1) {
this.observers.splice(index, 1);
}
}
// Notify all observers about node selection
notifyNodeSelected(node) {
for (const observer of this.observers) {
observer.onNodeSelected(node);
}
}
// Notify all observers about node deselection
notifyNodeDeselected(node) {
for (const observer of this.observers) {
observer.onNodeDeselected(node);
}
}
}
/**
* PropertiesPanelObserver - Dedicated to properties panel events only
*/
export class PropertiesPanelObserver {
constructor() {
this.observers = [];
}
addObserver(observer) {
if (typeof observer.onPropertiesPanelRequested !== 'function') {
throw new Error('Observer must implement onPropertiesPanelRequested method');
}
this.observers.push(observer);
}
removeObserver(observer) {
const index = this.observers.indexOf(observer);
if (index !== -1) {
this.observers.splice(index, 1);
}
}
notifyPropertiesPanelRequested(node) {
for (const observer of this.observers) {
observer.onPropertiesPanelRequested(node);
}
}
notifyPropertiesPanelClosed(node) {
for (const observer of this.observers) {
if (observer.onPropertiesPanelClosed) {
observer.onPropertiesPanelClosed(node);
}
}
}
}
/**
* ConnectionModeObserver - Dedicated to connection/arrow mode events
*/
export class ConnectionModeObserver {
constructor() {
this.observers = [];
}
addObserver(observer) {
if (typeof observer.onConnectionModeChanged !== 'function') {
throw new Error('Observer must implement onConnectionModeChanged method');
}
this.observers.push(observer);
}
removeObserver(observer) {
const index = this.observers.indexOf(observer);
if (index !== -1) {
this.observers.splice(index, 1);
}
}
notifyConnectionModeChanged(isEnabled) {
for (const observer of this.observers) {
observer.onConnectionModeChanged(isEnabled);
}
}
}
/**
* Properties Panel Manager - Implements observer interfaces
*/
export class PropertiesPanelManager {
constructor(panelElement, saveButton) {
this.panel = panelElement;
this.saveButton = saveButton;
this.activeNode = null;
this.setupDOMEventListeners();
}
// Implement the observer interface for properties panel events
onPropertiesPanelRequested(node) {
const plugin = PluginRegistry.get(node.type);
if (!plugin) {
this.hidePanel();
return;
}
this.showPanel(node, plugin);
}
onPropertiesPanelClosed(node) {
if (this.activeNode === node) {
this.hidePanel();
}
}
// Implement the observer interface for connection mode events
onConnectionModeChanged(isEnabled) {
if (isEnabled && this.activeNode) {
this.hidePanel();
}
}
// Implement the observer interface for node selection events
onNodeSelected(node) {
// Properties panel doesn't need to do anything special when nodes are selected
// The double-click handler takes care of showing the panel
}
onNodeDeselected(node) {
// When a node is deselected, close the properties panel if it's for that node
if (this.activeNode === node) {
this.hidePanel();
}
}
setupDOMEventListeners() {
this.saveButton.addEventListener('click', () => {
this.saveProperties();
});
this.panel.addEventListener('click', (e) => {
e.stopPropagation();
});
}
showPanel(node, plugin) {
this.activeNode = node;
// Calculate position for optimal placement
const nodeRect = node.group.getBoundingClientRect();
const panelWidth = 220;
const panelHeight = 400;
let dialogX = nodeRect.right + 10;
let dialogY = nodeRect.top;
if (dialogX + panelWidth > window.innerWidth) {
dialogX = nodeRect.left - panelWidth - 10;
}
if (dialogY + panelHeight > window.innerHeight) {
dialogY = window.innerHeight - panelHeight - 10;
}
if (dialogY < 10) {
dialogY = 10;
}
this.panel.style.left = dialogX + 'px';
this.panel.style.top = dialogY + 'px';
// Hide all groups first
const allGroups = this.panel.querySelectorAll('.prop-group, #label-group, #compute-group, #lb-group');
allGroups.forEach(g => g.style.display = 'none');
const shownGroups = new Set();
// Set up properties based on plugin definition
for (const prop of plugin.props) {
const group = this.panel.querySelector(`[data-group='${prop.group}']`);
const input = this.panel.querySelector(`[name='${prop.name}']`);
// Show group once
if (group && !shownGroups.has(group)) {
group.style.display = 'block';
shownGroups.add(group);
}
// Set value
if (input) {
input.value = node.props[prop.name] ?? prop.default;
}
}
this.saveButton.disabled = false;
this.panel.style.display = 'block';
setTimeout(() => {
this.panel.classList.add('visible');
}, 10);
}
hidePanel() {
if (!this.activeNode) return;
this.panel.classList.remove('visible');
setTimeout(() => {
this.panel.style.display = 'none';
}, 200);
this.activeNode = null;
}
saveProperties() {
if (!this.activeNode) return;
const node = this.activeNode;
const panel = this.panel;
const plugin = PluginRegistry.get(node.type);
if (!plugin || !plugin.props) {
this.hidePanel();
return;
}
// Loop through plugin-defined props and update the node
for (const prop of plugin.props) {
const input = panel.querySelector(`[name='${prop.name}']`);
if (!input) continue;
let value;
if (prop.type === 'number') {
value = parseFloat(input.value);
if (isNaN(value)) value = prop.default ?? 0;
} else {
value = input.value;
}
node.props[prop.name] = value;
if (prop.name === 'label') {
node.updateLabel(value);
}
}
this.hidePanel();
}
}
/**
* Selection Manager - Implements node selection observer interface
*/
export class SelectionManager {
constructor() {
this.selectedNode = null;
this.selectedConnection = null;
}
// Implement the observer interface for node selection
onNodeSelected(node) {
this.clearSelection();
this.selectedNode = node;
node.select(); // Visual feedback
}
onNodeDeselected(node) {
if (this.selectedNode === node) {
node.deselect();
this.selectedNode = null;
}
}
clearSelection() {
if (this.selectedNode) {
this.selectedNode.deselect();
this.selectedNode = null;
}
if (this.selectedConnection) {
this.selectedConnection.deselect();
this.selectedConnection = null;
}
}
}
/**
* Initialize the observer system
*/
export function initializeObservers(nodePropsPanel, propsSaveBtn) {
// Create the specific observers (subjects)
const nodeSelectionSubject = new NodeSelectionObserver();
const propertiesPanelSubject = new PropertiesPanelObserver();
const connectionModeSubject = new ConnectionModeObserver();
// Create the specific observers (listeners)
const propertiesPanel = new PropertiesPanelManager(nodePropsPanel, propsSaveBtn);
const selectionManager = new SelectionManager();
// Wire them together - each observer registers for what it cares about
nodeSelectionSubject.addObserver(selectionManager);
nodeSelectionSubject.addObserver(propertiesPanel); // Properties panel cares about selection too
propertiesPanelSubject.addObserver(propertiesPanel);
connectionModeSubject.addObserver(propertiesPanel); // Panel hides when arrow mode enabled
// Return the subjects so the main app can notify them
return {
nodeSelection: nodeSelectionSubject,
propertiesPanel: propertiesPanelSubject,
connectionMode: connectionModeSubject
};
}
/**
* How the main CanvasApp would use these observers
*/
export class CanvasAppWithObservers {
constructor() {
// Initialize observers
const observers = initializeObservers();
this.nodeSelectionSubject = observers.nodeSelection;
this.propertiesPanelSubject = observers.propertiesPanel;
this.connectionModeSubject = observers.connectionMode;
this.selectedNode = null;
this.arrowMode = false;
this.setupEventListeners();
}
setupEventListeners() {
// Canvas click - clear selection
this.canvas.addEventListener('click', (e) => {
if (e.detail > 1) return; // Ignore double-clicks
if (this.selectedNode) {
this.nodeSelectionSubject.notifyNodeDeselected(this.selectedNode);
this.selectedNode = null;
}
});
// Arrow mode toggle
this.arrowToolBtn.addEventListener('click', () => {
this.arrowMode = !this.arrowMode;
this.connectionModeSubject.notifyConnectionModeChanged(this.arrowMode);
});
}
// When a node is double-clicked
onNodeDoubleClick(node) {
if (!this.arrowMode) {
this.propertiesPanelSubject.notifyPropertiesPanelRequested(node);
}
}
// When a node is single-clicked
onNodeSingleClick(node) {
if (this.selectedNode !== node) {
if (this.selectedNode) {
this.nodeSelectionSubject.notifyNodeDeselected(this.selectedNode);
}
this.selectedNode = node;
this.nodeSelectionSubject.notifyNodeSelected(node);
}
}
}
/**
* Key Benefits of This Approach:
*
* 1. TYPE SAFETY: Each observer has a specific interface
* 2. SINGLE RESPONSIBILITY: Each observer handles ONE concern
* 3. NO MAGIC STRINGS: No event type constants that can be mistyped
* 4. COMPILE-TIME CHECKING: TypeScript/IDE can validate observer interfaces
* 5. FOCUSED: PropertiesPanelObserver only knows about properties panels
* 6. TESTABLE: Each observer can be tested with mock implementations
*
* This approach is more verbose but much safer and clearer about
* what each component is responsible for.
*/

4
static/pluginRegistry.js

@ -8,8 +8,4 @@ export class PluginRegistry {
static get(type) { static get(type) {
return this.plugins[type]; return this.plugins[type];
} }
static getAll() {
return Object.keys(this.plugins)
}
} }

4
static/plugins/database.js

@ -5,8 +5,6 @@ PluginRegistry.register('database', {
label: 'Database', label: 'Database',
props: [ props: [
{ name: 'label', type: 'string', default: 'Database', group: 'label-group' }, { name: 'label', type: 'string', default: 'Database', group: 'label-group' },
{ name: 'replication', type: 'number', default: 1, group: 'db-group' }, { name: 'replication', type: 'number', default: 1, group: 'db-group' }
{ name: 'maxRPS', type: 'number', default: 1000, group: 'db-group' },
{ name: 'baseLatencyMs', type: 'number', default: 10, group: 'db-group' }
] ]
}); });

3
static/plugins/messageQueue.js

@ -6,7 +6,6 @@ PluginRegistry.register('messageQueue', {
props: [ props: [
{ name: 'label', type: 'string', default: 'MQ', group: 'label-group' }, { name: 'label', type: 'string', default: 'MQ', group: 'label-group' },
{ name: 'queueCapacity', type: 'number', default: 10000, group: 'mq-group' }, { name: 'queueCapacity', type: 'number', default: 10000, group: 'mq-group' },
{ name: 'retentionSeconds', type: 'number', default: 600, group: 'mq-group' }, { name: 'retentionSeconds', type: 'number', default: 600, group: 'mq-group' }
{ name: 'processingRate', type: 'number', default: 100, group: 'mq-group' }
] ]
}); });

4
static/plugins/microservice.js

@ -9,6 +9,8 @@ PluginRegistry.register('microservice', {
{ name: 'cpu', type: 'number', default: 2, group: 'microservice-group' }, { name: 'cpu', type: 'number', default: 2, group: 'microservice-group' },
{ name: 'ramGb', type: 'number', default: 4, group: 'microservice-group' }, { name: 'ramGb', type: 'number', default: 4, group: 'microservice-group' },
{ name: 'rpsCapacity', type: 'number', default: 150, group: 'microservice-group' }, { name: 'rpsCapacity', type: 'number', default: 150, group: 'microservice-group' },
{ name: 'scalingStrategy', type: 'string', default: 'auto', group: 'microservice-group' } { name: 'monthlyUsd', type: 'number', default: 18, group: 'microservice-group' },
{ name: 'scalingStrategy', type: 'string', default: 'auto', group: 'microservice-group' },
{ name: 'apiVersion', type: 'string', default: 'v1', group: 'microservice-group' }
] ]
}); });

4
static/plugins/monitorAlerting.js

@ -6,8 +6,6 @@ PluginRegistry.register('monitoring/alerting', {
props: [ props: [
{ name: 'label', type: 'string', default: 'monitor', group: 'label-group' }, { name: 'label', type: 'string', default: 'monitor', group: 'label-group' },
{ name: 'tool', type: 'string', default: 'Prometheus', group: 'monitor-group' }, { name: 'tool', type: 'string', default: 'Prometheus', group: 'monitor-group' },
{ name: 'alertMetric', type: 'string', default: 'latency', group: 'monitor-group' }, { name: 'alertThreshold', type: 'number', default: 80, group: 'monitor-group' }
{ name: 'thresholdValue', type: 'number', default: 80, group: 'monitor-group' },
{ name: 'thresholdUnit', type: 'string', default: 'ms', group: 'monitor-group' }
] ]
}); });

4
static/plugins/webserver.js

@ -5,7 +5,9 @@ PluginRegistry.register('webserver', {
label: 'Web Server', label: 'Web Server',
props: [ props: [
{ name: 'label', type: 'string', default: 'Web Server', group: 'label-group' }, { name: 'label', type: 'string', default: 'Web Server', group: 'label-group' },
{ name: 'cpu', type: 'number', default: 2, group: 'compute-group' },
{ name: 'ramGb', type: 'number', default: 4, group: 'compute-group' },
{ name: 'rpsCapacity', type: 'number', default: 200, group: 'compute-group' }, { name: 'rpsCapacity', type: 'number', default: 200, group: 'compute-group' },
{ name: 'baseLatencyMs', type: 'number', default: 20, group: 'compute-group' } { name: 'monthlyCostUsd', type: 'number', default: 20, group: 'compute-group' }
] ]
}); });

156
static/states/CanvasState.js

@ -1,156 +0,0 @@
/**
* Base Canvas State - Nystrom's State Pattern Implementation
*
* This abstract base class defines the interface that all canvas states must implement.
* Each state handles user interactions differently, eliminating the need for mode checking.
*/
export class CanvasState {
/**
* Called when entering this state
* @param {CanvasApp} app - The canvas application context
*/
enter(app) {
// Override in concrete states
}
/**
* Called when exiting this state
* @param {CanvasApp} app - The canvas application context
*/
exit(app) {
// Override in concrete states
}
/**
* Handle clicks on the canvas background
* @param {CanvasApp} app - The canvas application context
* @param {MouseEvent} event - The click event
*/
handleCanvasClick(app, event) {
// Default: clear selections
if (event.detail > 1) return; // Ignore double-clicks
// Clear any connection start
if (app.connectionStart) {
app.connectionStart.group.classList.remove('selected');
app.connectionStart = null;
}
// Clear node selection via observer
if (app.selectedNode) {
app.nodeSelectionSubject.notifyNodeDeselected(app.selectedNode);
app.selectedNode = null;
}
// Clear connection selection
if (app.selectedConnection) {
app.selectedConnection.deselect();
app.selectedConnection = null;
}
}
/**
* Handle single clicks on nodes
* @param {CanvasApp} app - The canvas application context
* @param {ComponentNode} node - The clicked node
* @param {MouseEvent} event - The click event
*/
handleNodeClick(app, node, event) {
// Override in concrete states
throw new Error(`${this.constructor.name} must implement handleNodeClick()`);
}
/**
* Handle double clicks on nodes
* @param {CanvasApp} app - The canvas application context
* @param {ComponentNode} node - The double-clicked node
*/
handleNodeDoubleClick(app, node) {
// Override in concrete states
throw new Error(`${this.constructor.name} must implement handleNodeDoubleClick()`);
}
/**
* Handle component drops from sidebar
* @param {CanvasApp} app - The canvas application context
* @param {DragEvent} event - The drop event
*/
async handleDrop(app, event) {
// Default implementation - most states allow dropping
const type = event.dataTransfer.getData('text/plain');
// Import PluginRegistry dynamically to avoid circular imports
const { PluginRegistry } = await import('../pluginRegistry.js');
const plugin = PluginRegistry.get(type);
if (!plugin) return;
const pt = app.canvas.createSVGPoint();
pt.x = event.clientX;
pt.y = event.clientY;
const svgP = pt.matrixTransform(app.canvas.getScreenCTM().inverse());
const x = svgP.x - app.componentSize.width / 2;
const y = svgP.y - app.componentSize.height / 2;
const { generateDefaultProps } = await import('../utils.js');
const { ComponentNode } = await import('../node.js');
const props = generateDefaultProps(plugin);
const node = new ComponentNode(type, x, y, app, props);
node.x = x;
node.y = y;
}
/**
* Handle keyboard events
* @param {CanvasApp} app - The canvas application context
* @param {KeyboardEvent} event - The keyboard event
*/
handleKeyDown(app, event) {
// Default: handle delete key
if (event.key === 'Backspace' || event.key === 'Delete') {
if (app.selectedConnection) {
app.canvas.removeChild(app.selectedConnection.line);
app.canvas.removeChild(app.selectedConnection.text);
const index = app.connections.indexOf(app.selectedConnection);
if (index !== -1) app.connections.splice(index, 1);
app.selectedConnection = null;
} else if (app.selectedNode) {
app.canvas.removeChild(app.selectedNode.group);
app.placedComponents = app.placedComponents.filter(n => n !== app.selectedNode);
app.connections = app.connections.filter(conn => {
if (conn.start === app.selectedNode || conn.end === app.selectedNode) {
app.canvas.removeChild(conn.line);
app.canvas.removeChild(conn.text);
return false;
}
return true;
});
app.selectedNode = null;
app.activeNode = null;
}
}
}
/**
* Get the display name of this state
*/
getStateName() {
return this.constructor.name.replace('State', '');
}
/**
* Get the cursor style for this state
*/
getCursor() {
return 'default';
}
/**
* Whether this state allows properties panel to open
*/
allowsPropertiesPanel() {
return true;
}
}

161
static/states/CanvasStateMachine.js

@ -1,161 +0,0 @@
/**
* Canvas State Machine - Manages state transitions for the canvas
*
* This class coordinates state changes and ensures proper enter/exit calls.
* It follows Nystrom's State Pattern implementation guidelines.
*/
import { DesignState } from './DesignState.js';
import { ConnectionState } from './ConnectionState.js';
export class CanvasStateMachine {
constructor(app) {
this.app = app;
this.currentState = null;
// Pre-create state instances for reuse
this.states = {
design: new DesignState(),
connection: new ConnectionState()
};
// Start in design state
this.changeState('design');
}
/**
* Change to a new state
* @param {string} stateName - Name of the state to change to
*/
changeState(stateName) {
const newState = this.states[stateName];
if (!newState) {
return;
}
if (this.currentState === newState) {
return;
}
// Exit current state
if (this.currentState) {
this.currentState.exit(this.app);
}
// Enter new state
const previousState = this.currentState;
this.currentState = newState;
this.currentState.enter(this.app);
// Notify any listeners about state change
this.onStateChanged(previousState, newState);
}
/**
* Toggle between design and connection states
*/
toggleConnectionMode() {
const currentStateName = this.getCurrentStateName();
if (currentStateName === 'design') {
this.changeState('connection');
} else {
this.changeState('design');
}
}
/**
* Get the current state name
*/
getCurrentStateName() {
return this.currentState ? this.currentState.getStateName().toLowerCase() : 'none';
}
/**
* Get the current state instance
*/
getCurrentState() {
return this.currentState;
}
/**
* Check if currently in a specific state
* @param {string} stateName
*/
isInState(stateName) {
return this.getCurrentStateName() === stateName.toLowerCase();
}
/**
* Delegate canvas click to current state
*/
handleCanvasClick(event) {
if (this.currentState) {
this.currentState.handleCanvasClick(this.app, event);
}
}
/**
* Delegate node click to current state
*/
handleNodeClick(node, event) {
if (this.currentState) {
this.currentState.handleNodeClick(this.app, node, event);
}
}
/**
* Delegate node double-click to current state
*/
handleNodeDoubleClick(node) {
if (this.currentState) {
this.currentState.handleNodeDoubleClick(this.app, node);
}
}
/**
* Delegate drop event to current state
*/
handleDrop(event) {
if (this.currentState) {
this.currentState.handleDrop(this.app, event);
}
}
/**
* Delegate keyboard event to current state
*/
handleKeyDown(event) {
if (this.currentState) {
this.currentState.handleKeyDown(this.app, event);
}
}
/**
* Called when state changes - override for custom behavior
*/
onStateChanged(previousState, newState) {
// Could emit events, update analytics, etc.
// Update any debug UI
if (this.app.debugStateDisplay) {
this.app.debugStateDisplay.textContent = `State: ${newState.getStateName()}`;
}
}
/**
* Get available states for debugging/UI
*/
getAvailableStates() {
return Object.keys(this.states);
}
/**
* Force change to design state (safe reset)
*/
resetToDesignState() {
this.changeState('design');
}
}

113
static/states/ConnectionState.js

@ -1,113 +0,0 @@
/**
* Connection State - Arrow mode for connecting components
*
* In this state, users can:
* - Click nodes to start/end connections
* - See visual feedback for connection process
* - Cannot edit properties (properties panel disabled)
*/
import { CanvasState } from './CanvasState.js';
import { Connection } from '../connection.js';
export class ConnectionState extends CanvasState {
enter(app) {
super.enter(app);
// Update UI to reflect connection mode
app.arrowToolBtn.classList.add('active');
app.canvas.style.cursor = this.getCursor();
// Hide properties panel (connection mode disables editing)
if (app.selectedNode) {
app.propertiesPanelSubject.notifyPropertiesPanelClosed(app.selectedNode);
}
// Notify observers that connection mode is enabled
app.connectionModeSubject.notifyConnectionModeChanged(true);
}
exit(app) {
super.exit(app);
// Clear any pending connection
if (app.connectionStart) {
app.connectionStart.group.classList.remove('selected');
app.connectionStart = null;
}
}
handleNodeClick(app, node, event) {
event.stopPropagation();
// Clear any selected connection when starting a new connection
if (app.selectedConnection) {
app.selectedConnection.deselect();
app.selectedConnection = null;
}
if (!app.connectionStart) {
// First click - start connection
app.connectionStart = node;
node.group.classList.add('selected');
} else if (app.connectionStart === node) {
// Clicked same node - cancel connection
app.connectionStart.group.classList.remove('selected');
app.connectionStart = null;
} else {
// Second click - complete connection
this.createConnection(app, app.connectionStart, node);
app.connectionStart.group.classList.remove('selected');
app.connectionStart = null;
}
}
handleNodeDoubleClick(app, node) {
// In connection mode, double-click does nothing
// Properties panel is disabled in this state
}
handleCanvasClick(app, event) {
// Cancel any pending connection when clicking canvas
if (app.connectionStart) {
app.connectionStart.group.classList.remove('selected');
app.connectionStart = null;
}
// Don't clear node selections in connection mode
// Users should be able to see what's selected while connecting
}
/**
* Create a connection between two nodes
* @param {CanvasApp} app
* @param {ComponentNode} startNode
* @param {ComponentNode} endNode
*/
createConnection(app, startNode, endNode) {
// Set up pending connection for modal
app.pendingConnection = { start: startNode, end: endNode };
// Setup connection modal (reuse existing modal logic)
Connection.setupModal(app);
Connection.labelInput.value = 'Read traffic';
Connection.protocolInput.value = 'HTTP';
Connection.tlsCheckbox.checked = false;
Connection.capacityInput.value = '1000';
Connection.modal.style.display = 'block';
}
getCursor() {
return 'crosshair';
}
allowsPropertiesPanel() {
return false; // Disable properties panel in connection mode
}
getStateName() {
return 'Connection';
}
}

79
static/states/DesignState.js

@ -1,79 +0,0 @@
/**
* Design State - Default canvas interaction mode
*
* In this state, users can:
* - Place components from sidebar
* - Select/deselect components
* - Edit component properties
* - Delete components
*/
import { CanvasState } from './CanvasState.js';
export class DesignState extends CanvasState {
enter(app) {
super.enter(app);
// Update UI to reflect design mode
app.arrowToolBtn.classList.remove('active');
app.canvas.style.cursor = this.getCursor();
// Clear any connection state
if (app.connectionStart) {
app.connectionStart.group.classList.remove('selected');
app.connectionStart = null;
}
// Notify observers that connection mode is disabled
app.connectionModeSubject.notifyConnectionModeChanged(false);
}
handleNodeClick(app, node, event) {
event.stopPropagation();
// Clear any selected connection when clicking a node
if (app.selectedConnection) {
app.selectedConnection.deselect();
app.selectedConnection = null;
}
// Clear previous node selection and select this node
if (app.selectedNode && app.selectedNode !== node) {
app.nodeSelectionSubject.notifyNodeDeselected(app.selectedNode);
}
// Select the clicked node
node.select();
app.nodeSelectionSubject.notifyNodeSelected(node);
}
handleNodeDoubleClick(app, node) {
// Show properties panel for the node
app.propertiesPanelSubject.notifyPropertiesPanelRequested(node);
}
handleCanvasClick(app, event) {
// Don't hide props panel if clicking on it
if (!app.nodePropsPanel.contains(event.target)) {
// Use observer to notify that properties panel should be closed
if (app.selectedNode) {
app.propertiesPanelSubject.notifyPropertiesPanelClosed(app.selectedNode);
}
}
// Use base implementation for other clearing logic
super.handleCanvasClick(app, event);
}
getCursor() {
return 'default';
}
allowsPropertiesPanel() {
return true;
}
getStateName() {
return 'Design';
}
}

831
static/style.css

@ -48,8 +48,8 @@ body {
flex-direction: row; flex-direction: row;
min-height: 100vh; min-height: 100vh;
background: radial-gradient(circle at 30% 50%, rgba(0, 255, 136, 0.1), transparent 50%), background: radial-gradient(circle at 30% 50%, rgba(0, 255, 136, 0.1), transparent 50%),
radial-gradient(circle at 70% 80%, rgba(255, 107, 53, 0.1), transparent 50%), radial-gradient(circle at 70% 80%, rgba(255, 107, 53, 0.1), transparent 50%),
var(--color-bg-body) var(--color-bg-body)
} }
/* === LAYOUT === */ /* === LAYOUT === */
@ -68,29 +68,13 @@ body {
display: flex; display: flex;
align-items: center; align-items: center;
justify-content: space-between; justify-content: space-between;
}
.header-text {
font-size: 24px;
margin: 0;
text-shadow: 0 0 10px rgba(0, 255, 136, 0.8);
.header-logo-container {
display: flex;
align-items: center;
gap: 12px;
}
.header-text {
font-size: 24px;
margin: 0;
text-shadow: 0 0 10px rgba(0, 255, 136, 0.8);
}
.beta-pill {
background: linear-gradient(45deg, #ff6b35, #f7931e);
color: white;
padding: 4px 8px;
border-radius: 12px;
font-size: 0.7rem;
font-weight: 600;
text-transform: uppercase;
letter-spacing: 0.5px;
}
} }
#main-content { #main-content {
@ -107,17 +91,17 @@ body {
flex-wrap: wrap; flex-wrap: wrap;
flex-direction: row; flex-direction: row;
gap: var(--component-gap); gap: var(--component-gap);
}
.sidebar-title { .sidebar-title {
color: #8b949e; color: #8b949e;
font-size: 14px; font-size: 14px;
text-transform: uppercase; text-transform: uppercase;
letter-spacing: 1px; letter-spacing: 1px;
margin-bottom: 15px; margin-bottom: 15px;
padding-bottom: 8px; padding-bottom: 8px;
padding-left: 8px; padding-left: 8px;
border-bottom: 1px solid #303638; border-bottom: 1px solid #303638;
}
} }
/* === COMPONENT ICONS === */ /* === COMPONENT ICONS === */
@ -134,33 +118,23 @@ body {
font-size: 16px; font-size: 16px;
color: var(--color-text-primary); color: var(--color-text-primary);
transition: background-color 0.1s ease; transition: background-color 0.1s ease;
}
&:hover { .component-icon:hover,
background-color: var(--color-bg-hover); #arrow-tool:hover {
border-color: var(--color-border-accent); background-color: var(--color-bg-hover);
} border-color: var(--color-border-accent);
}
&:active {
cursor: grabbing;
}
&.dragging .tooltip {
display: none;
}
&:hover .tooltip { .component-icon:active,
visibility: visible; #arrow-tool:active {
opacity: 1; cursor: grabbing;
z-index: 1000;
}
} }
#arrow-tool { #arrow-tool.active {
&.active { background-color: var(--color-bg-accent);
background-color: var(--color-bg-accent); color: var(--color-text-white);
color: var(--color-text-white); border-color: var(--color-button);
border-color: var(--color-button);
}
} }
/* === TOOLTIP === */ /* === TOOLTIP === */
@ -182,6 +156,16 @@ body {
transition: opacity 0.2s; transition: opacity 0.2s;
} }
.component-icon:hover .tooltip {
visibility: visible;
opacity: 1;
z-index: 1000;
}
.component-icon.dragging .tooltip {
display: none;
}
/* === CANVAS === */ /* === CANVAS === */
#canvas-wrapper { #canvas-wrapper {
flex: 1; flex: 1;
@ -213,11 +197,11 @@ body {
.dropped { .dropped {
cursor: move; cursor: move;
}
&.selected rect { .dropped.selected rect {
stroke: #00bcd4; stroke: #00bcd4;
stroke-width: 2; stroke-width: 2;
}
} }
/* === TOOLBAR === */ /* === TOOLBAR === */
@ -233,28 +217,28 @@ body {
border-radius: var(--radius-small); border-radius: var(--radius-small);
padding: 6px; padding: 6px;
box-shadow: 0 0 8px rgba(0, 0, 0, 0.4); box-shadow: 0 0 8px rgba(0, 0, 0, 0.4);
}
.toolbar-btn { .toolbar-btn {
background: none; background: none;
border: 1px solid var(--color-border); border: 1px solid var(--color-border);
color: var(--color-text-primary); color: var(--color-text-primary);
padding: 6px 10px; padding: 6px 10px;
border-radius: var(--radius-small); border-radius: var(--radius-small);
font-size: 14px; font-size: 14px;
cursor: pointer; cursor: pointer;
font-family: var(--font-family-mono); font-family: var(--font-family-mono);
}
&:hover {
background-color: var(--color-bg-hover); .toolbar-btn:hover {
border-color: var(--color-border-accent); background-color: var(--color-bg-hover);
} border-color: var(--color-border-accent);
}
&.active {
background-color: var(--color-bg-accent); .toolbar-btn.active {
color: var(--color-text-white); background-color: var(--color-bg-accent);
border-color: var(--color-button); color: var(--color-text-white);
} border-color: var(--color-button);
}
} }
/* === PANELS === */ /* === PANELS === */
@ -272,27 +256,10 @@ body {
z-index: 10; z-index: 10;
border: 1px solid var(--color-text-dark); border: 1px solid var(--color-text-dark);
box-shadow: 0 0 8px rgba(0, 0, 0, 0.3); box-shadow: 0 0 8px rgba(0, 0, 0, 0.3);
.panel-title {
font-weight: bold;
color: var(--color-text-white);
font-size: 15px;
margin-bottom: 0.5rem;
}
.panel-metric {
margin-bottom: 0.4rem;
.label {
display: inline-block;
width: 140px;
color: var(--color-text-muted);
}
}
} }
#node-props-panel { #node-props-panel {
position: fixed; position: absolute;
width: 220px; width: 220px;
background-color: var(--color-bg-sidebar); background-color: var(--color-bg-sidebar);
border: 1px solid var(--color-border); border: 1px solid var(--color-border);
@ -302,58 +269,58 @@ body {
box-shadow: 0 0 10px rgba(0, 0, 0, 0.6); box-shadow: 0 0 10px rgba(0, 0, 0, 0.6);
display: none; display: none;
z-index: 10; z-index: 10;
opacity: 0; }
transform: scale(0.95);
transition: opacity 0.2s ease-out, transform 0.2s ease-out; #node-props-panel h3 {
margin-top: 0;
&.visible { font-size: 15px;
opacity: 1; color: var(--color-text-primary);
transform: scale(1); }
}
#node-props-panel .form-group {
h3 { margin-bottom: 10px;
margin-top: 0; }
font-size: 15px;
color: var(--color-text-primary); #node-props-panel label {
} display: block;
font-weight: bold;
.form-group { margin-bottom: 4px;
margin-bottom: 10px; }
}
#node-props-panel select {
label { width: 100%;
display: block; padding: 4px;
font-weight: bold; font-size: 14px;
margin-bottom: 4px;
}
select {
width: 100%;
padding: 4px;
font-size: 14px;
}
button:disabled {
background-color: var(--color-button-disabled);
cursor: not-allowed;
}
} }
.prop-group { .prop-group {
display: none; display: none;
margin-bottom: 12px; margin-bottom: 12px;
}
label, .prop-group label,
input { .prop-group input {
display: block; display: block;
width: 100%; width: 100%;
margin-top: 6px; margin-top: 6px;
font-size: 13px; font-size: 13px;
}
} }
#score-panel { .panel-title {
margin-top: 16px; font-weight: bold;
color: var(--color-text-white);
font-size: 15px;
margin-bottom: 0.5rem;
}
.panel-metric {
margin-bottom: 0.4rem;
}
.panel-metric .label {
display: inline-block;
width: 140px;
color: var(--color-text-muted);
} }
/* === INPUTS & BUTTONS === */ /* === INPUTS & BUTTONS === */
@ -368,8 +335,7 @@ input[type="number"] {
} }
#node-props-save, #node-props-save,
#run-button, #run-button {
.continue-button {
margin-top: 8px; margin-top: 8px;
padding: 10px; padding: 10px;
background-color: var(--color-button); background-color: var(--color-button);
@ -378,21 +344,12 @@ input[type="number"] {
border-radius: var(--radius-small); border-radius: var(--radius-small);
cursor: pointer; cursor: pointer;
font-size: 14px; font-size: 14px;
&:disabled {
background-color: var(--color-button-disabled);
cursor: not-allowed;
}
} }
.continue-section { #run-button:disabled,
display: flex; #node-props-panel button:disabled {
flex-direction: row; background-color: var(--color-button-disabled);
gap: 30px; cursor: not-allowed;
}
.continue-button {
width: 30%;
} }
#github-login-btn { #github-login-btn {
@ -410,15 +367,15 @@ input[type="number"] {
border: 1px solid #2ea043; border: 1px solid #2ea043;
transition: background-color 0.2s ease; transition: background-color 0.2s ease;
float: right; float: right;
}
&:hover { #github-login-btn:hover {
background-color: #ccc; background-color: #ccc;
} }
img { #github-login-btn img {
width: 18px; width: 18px;
height: 18px; height: 18px;
}
} }
/* === TABS === */ /* === TABS === */
@ -427,75 +384,47 @@ input[type="number"] {
flex-direction: column; flex-direction: column;
height: 100%; height: 100%;
overflow: hidden; overflow: hidden;
}
.tab-labels { .tab-labels {
display: flex; display: flex;
cursor: pointer; cursor: pointer;
}
label {
padding: 10px 20px;
background: var(--color-bg-body);
margin-right: 4px;
margin-bottom: 20px;
border-radius: var(--radius-small);
}
}
.tab-content { .tab-labels label {
border-top: 1px solid var(--color-border-panel); padding: 10px 20px;
padding: 20px 0 0; background: var(--color-bg-body);
display: none; margin-right: 4px;
height: 100%; margin-bottom: 20px;
} border-radius: var(--radius-small);
} }
input[name="tab"] { .tab-content {
border-top: 1px solid var(--color-border-panel);
padding: 20px 0 0;
display: none; display: none;
height: 100%;
} }
#tab1:checked~.tabs { input[name="tab"] {
.tab-labels label[for="tab1"] { display: none;
background: var(--color-bg-tab-active);
font-weight: bold;
color: var(--color-text-accent);
}
#content1 {
display: flex;
flex-direction: column;
height: 100%;
overflow: hidden;
}
} }
#tab2:checked~.tabs { #tab1:checked ~ .tabs .tab-labels label[for="tab1"],
.tab-labels label[for="tab2"] { #tab2:checked ~ .tabs .tab-labels label[for="tab2"],
background: var(--color-bg-tab-active); #tab3:checked ~ .tabs .tab-labels label[for="tab3"] {
font-weight: bold; background: var(--color-bg-tab-active);
color: var(--color-text-accent); font-weight: bold;
} color: var(--color-text-accent);
#content2 {
display: flex;
flex-direction: column;
height: 100%;
overflow: hidden;
}
} }
#tab3:checked~.tabs { #tab1:checked ~ .tabs #content1,
.tab-labels label[for="tab3"] { #tab2:checked ~ .tabs #content2,
background: var(--color-bg-tab-active); #tab3:checked ~ .tabs #content3 {
font-weight: bold; display: flex;
color: var(--color-text-accent); flex-direction: column;
} height: 100%;
overflow: hidden;
#content3 {
display: flex;
flex-direction: column;
height: 100%;
overflow: hidden;
}
} }
/* === CHALLENGE PANEL === */ /* === CHALLENGE PANEL === */
@ -506,57 +435,54 @@ input[name="tab"] {
border: 2px solid var(--color-border-panel); border: 2px solid var(--color-border-panel);
border-radius: var(--radius-large); border-radius: var(--radius-large);
padding: 0 12px; padding: 0 12px;
}
.challenge-list {
list-style: none;
margin: 0;
padding: 0;
}
.challenge-item {
padding: 10px;
margin: 5px 0;
background: #21262d;
border-radius: 6px;
cursor: pointer;
transition: all 0.2s ease;
border-left: 3px solid transparent;
list-style: none;
}
.challenge-item:hover {
background: #30363d;
}
.challenge-item.active {
background: #1a3d2a;
border-left-color: #00ff88;
}
.challenge-name {
font-weight: 500;
margin-bottom: 5px;
}
.challenge-difficulty {
font-size: 0.8rem;
color: #0b949e;
}
.challenge-difficulty.easy {
color: #3fb950;
}
.challenge-list { .challenge-difficulty.medium {
list-style: none; color: #d29922;
margin: 0; }
padding: 0;
} .challenge-difficulty.hard {
color: #f85149;
.challenge-item {
padding: 10px;
margin: 5px 0;
background: #21262d;
border-radius: 6px;
cursor: pointer;
transition: all 0.2s ease;
border-left: 3px solid transparent;
list-style: none;
&:hover {
background: #30363d;
}
&.active {
background: #1a3d2a;
border-left-color: #00ff88;
}
}
.challenge-name a:link,
.challenge-name a:visited {
font-weight: 500;
margin-bottom: 5px;
color: #fff;
text-decoration: none;
}
.challenge-difficulty {
font-size: 0.8rem;
color: #0b949e;
&.easy {
color: #3fb950;
}
&.medium {
color: #d29922;
}
&.hard {
color: #f85149;
}
}
} }
/* === REQUIREMENTS === */ /* === REQUIREMENTS === */
@ -566,26 +492,26 @@ input[name="tab"] {
border-radius: 8px; border-radius: 8px;
padding: 20px; padding: 20px;
margin-bottom: 20px; margin-bottom: 20px;
}
.requirements-list { .requirements-list {
margin: 0; margin: 0;
padding: 0; padding: 0;
list-style: none; list-style: none;
} }
.requirement-item { .requirement-item {
position: relative; position: relative;
padding: 8px 0 8px 25px; padding: 8px 0 8px 25px;
margin: 0; margin: 0;
border-bottom: 1px solid #30363d; border-bottom: 1px solid #30363d;
}
&:before { .requirement-item:before {
content: "✓"; content: "✓";
color: #00ff88; color: #00ff88;
position: absolute; position: absolute;
left: 0; left: 0;
}
}
} }
/* === MODAL === */ /* === MODAL === */
@ -600,313 +526,42 @@ input[name="tab"] {
border: 1px solid #444; border: 1px solid #444;
z-index: 999; z-index: 999;
color: #ccc; color: #ccc;
.modal-content label {
display: block;
margin: 10px 0;
}
.modal-actions {
margin-top: 10px;
text-align: right;
}
input,
select {
width: 100%;
padding: 6px;
margin-top: 4px;
background: #222;
border: 1px solid #444;
color: #fff;
border-radius: 4px;
}
}
/* === MISC === */
.userbox {
display: flex;
align-items: center;
gap: 12px;
.avatar {
width: 24px;
height: 24px;
border-radius: 12px;
}
}
/* === CHATBOT STYLES ===*/
#start-chat {
display: none !important; /* Hide chat button temporarily */
position: fixed;
bottom: 25px;
right: 25px;
padding: 0.75rem;
background: #22c55e;
border: none;
border-radius: 12px;
color: white;
cursor: pointer;
transition: all 0.3s ease;
align-items: center;
justify-content: center;
min-width: 48px;
box-shadow: 0 4px 12px rgba(34, 197, 94, 0.3);
animation: breathe 1s ease-in-out infinite;
transform-origin: center bottom;
} }
.chat { .modal-content label {
display: none; display: block;
margin: 10px 0;
section {
--_scrollbar_width: 8px;
display: flex;
flex-direction: column;
gap: 1rem;
padding: 1rem;
overflow-y: auto;
scrollbar-width: var(--_scrollbar_width);
scrollbar-color: rgb(33, 38, 45) transparent;
height: 52vh;
width: 400px;
&::-webkit-scrollbar {
width: var(--_scrollbar_width);
}
&::-webkit-scrollbar-track {
background: transparent;
}
&::-webkit-scrollbar-thumb {
background: rgba(34, 197, 94, 0.3);
border-radius: 2px;
}
p.other,
p.me {
--_border_width: 2px;
--_border_radius: 8px;
max-width: 85%;
padding: 0.75rem 1rem;
background: rgba(14, 19, 13, 0.4);
border-radius: var(--_border_radius);
font-size: 0.9rem;
line-height: 1.4;
backdrop-filter: blur(10px);
border: 1px solid rgba(34, 197, 94, 0.1);
animation: messageSlide 0.4s ease-out;
}
.me {
margin-left: auto;
border-right: var(--_border_width) solid #22c55e;
border-radius: var(--_border_radius) 4px 4px var(--_border_radius);
color: #e5f5e5;
}
.other {
border-left: var(--_border_width) solid #38bdf8;
border-radius: 4px var(--_border_radius) var(--_border_radius) 4px;
color: #e0f2fe;
}
}
footer {
display: flex;
align-items: center;
gap: 0.75rem;
padding: 0.5rem 1rem;
background: #21262d;
border-top: 1px solid rgba(34, 197, 94, 0.1);
border-radius: 0 0 0.5rem 0.5rem;
textarea {
--_padding: 0.75rem;
--_lines: 3;
border: 0;
flex: 1;
resize: none;
padding: var(--_padding);
height: calc((var(--_lines) * 1lh) + (var(--_padding) * 2));
background: #161b22;
border-radius: 12px;
color: #fff;
font-size: 0.9rem;
outline: none;
transition: all 0.3s ease;
backdrop-filter: blur(10px);
&::placeholder {
color: rgba(255, 255, 255, 0.5);
}
&:focus {
box-shadow: 0 0 0 2px rgba(34, 197, 94, 0.1);
background: rgba(14, 19, 13, 0.9);
}
}
button {
padding: 0.75rem;
background: transparent;
border: none;
border-radius: 12px;
color: #22c55e;
cursor: pointer;
transition: all 0.3s ease;
display: flex;
align-items: center;
justify-content: center;
min-width: 48px;
&:hover {
color: #fff;
transform: translateY(-2px);
}
&:active {
transform: translateY(0);
}
svg {
--_size: 1.25rem;
width: var(--_size);
height: var(--_size);
}
}
}
}
#chat-header {
padding: 16px 8px;
box-sizing: border-box;
background: #21262d;
.chat-title {
font-family: Arial, Helvetica, sans-serif;
font-size: 18px;
margin: 0;
padding: 0;
}
.powered-by {
margin: 0;
font-size: 12px;
}
} }
.chat-checkbox { .modal-actions {
display: none; margin-top: 10px;
text-align: right;
&:checked+.chat {
display: grid;
position: fixed;
bottom: 100px;
right: 0;
grid-template-rows: 1fr auto;
max-width: 400px;
height: 60vh;
max-height: 70vh;
background: #161b22;
border-radius: 0.5rem;
box-shadow: 0 20px 40px rgba(0, 0, 0, 0.5), 0 0 0 1px rgba(34, 197, 94, 0.1);
backdrop-filter: blur(20px);
}
}
.loading-indicator {
max-width: 85%;
padding: 0.75rem 1rem;
background: rgba(56, 189, 248, 0.05);
border: 1px solid rgba(56, 189, 248, 0.3);
border-left: 4px solid #38bdf8;
border-radius: 4px 16px 16px 4px;
display: flex;
align-items: center;
gap: 0.5rem;
animation: messageSlide 0.4s ease-out;
span {
color: #38bdf8;
font-size: 0.8rem;
opacity: 0.8;
}
} }
.loading-dots { .modal input,
display: flex; .modal select {
gap: 0.2rem; width: 100%;
padding: 6px;
.loading-dot { margin-top: 4px;
width: 4px; background: #222;
height: 4px; border: 1px solid #444;
background: #38bdf8; color: #fff;
border-radius: 50%; border-radius: 4px;
animation: loadingDot 1.4s infinite;
&:nth-child(2) {
animation-delay: 0.2s;
}
&:nth-child(3) {
animation-delay: 0.4s;
}
}
} }
/* === KEYFRAMES === */ /* === MISC === */
@keyframes messageSlide { #score-panel {
from { margin-top: 16px;
opacity: 0;
transform: translateY(15px);
}
to {
opacity: 1;
transform: translateY(0);
}
} }
@keyframes loadingDot { .userbox {
display: flex;
0%, align-items: center;
60%, gap: 12px;
100% {
transform: scale(1);
opacity: 0.4;
}
30% {
transform: scale(1.3);
opacity: 1;
}
} }
.avatar {
@keyframes pulse { width: 24px;
height: 24px;
0%, border-radius: 12px;
100% {
opacity: 1;
transform: scale(1);
}
50% {
opacity: 0.6;
transform: scale(1.1);
}
} }
@keyframes breathe {
0%,
100% {
transform: scale(1) translateY(0);
}
50% {
transform: scale(1.02) translateY(-1px);
}
}

42
static/success.html

@ -77,29 +77,11 @@ body {
justify-content: space-between; justify-content: space-between;
} }
.header-logo-container {
display: flex;
align-items: center;
gap: 12px;
}
.header-text { .header-text {
font-size: 24px; font-size: 24px;
margin: 0; margin: 0;
} }
.beta-pill {
background: linear-gradient(45deg, #ff6b35, #f7931e);
color: white;
padding: 4px 8px;
border-radius: 12px;
font-size: 0.7rem;
font-weight: 700;
text-transform: uppercase;
letter-spacing: 0.5px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3);
}
#main-content { #main-content {
display: flex; display: flex;
flex-direction: row; flex-direction: row;
@ -597,10 +579,7 @@ input[name="tab"] {
<body> <body>
<div id="page-container"> <div id="page-container">
<div id="sd-header"> <div id="sd-header">
<div class="header-logo-container"> <h1 class="header-text">System Design Game</h1>
<h1 class="header-text">System Design Game</h1>
<div class="beta-pill">BETA</div>
</div>
<div class="userbox"> <div class="userbox">
<div class="status-indicator" style="color: var(--color-text-accent); font-weight: bold;"> <div class="status-indicator" style="color: var(--color-text-accent); font-weight: bold;">
✅ SYSTEM SUCCESS ✅ SYSTEM SUCCESS
@ -612,25 +591,25 @@ input[name="tab"] {
<div style="max-width: 600px; width: 100%; background: var(--color-bg-component); border: 1px solid var(--color-border); border-radius: var(--radius-large); padding: 32px;"> <div style="max-width: 600px; width: 100%; background: var(--color-bg-component); border: 1px solid var(--color-border); border-radius: var(--radius-large); padding: 32px;">
<h2 style="color: var(--color-text-accent); font-size: 32px; text-align: center; margin-top: 0;">🏆 Mission Accomplished</h2> <h2 style="color: var(--color-text-accent); font-size: 32px; text-align: center; margin-top: 0;">🏆 Mission Accomplished</h2>
<p style="text-align: center; font-size: 16px; color: var(--color-text-muted); margin-bottom: 32px;"> <p style="text-align: center; font-size: 16px; color: var(--color-text-muted); margin-bottom: 32px;">
{{.LevelName}} completed successfully! Your architecture scaled and met all performance targets. Well done! Your architecture scaled successfully and met all performance targets. Well done!
</p> </p>
<div class="failure-metrics" style="display: grid; grid-template-columns: 1fr 1fr; gap: 16px; margin-bottom: 24px;"> <div class="failure-metrics" style="display: grid; grid-template-columns: 1fr 1fr; gap: 16px; margin-bottom: 24px;">
<div class="metric-item" style="background: var(--color-bg-dark); border: 1px solid var(--color-border); padding: 16px; border-radius: var(--radius-medium); text-align: center;"> <div class="metric-item" style="background: var(--color-bg-dark); border: 1px solid var(--color-border); padding: 16px; border-radius: var(--radius-medium); text-align: center;">
<div class="metric-label" style="color: var(--color-text-muted); margin-bottom: 6px;">Target RPS</div> <div class="metric-label" style="color: var(--color-text-muted); margin-bottom: 6px;">Target RPS</div>
<div class="metric-value" style="font-size: 20px; font-weight: bold;">{{.TargetRPS | printf "%d"}}</div> <div class="metric-value" style="font-size: 20px; font-weight: bold;">10,000</div>
</div> </div>
<div class="metric-item" style="background: var(--color-bg-dark); border: 1px solid var(--color-border); padding: 16px; border-radius: var(--radius-medium); text-align: center;"> <div class="metric-item" style="background: var(--color-bg-dark); border: 1px solid var(--color-border); padding: 16px; border-radius: var(--radius-medium); text-align: center;">
<div class="metric-label" style="color: var(--color-text-muted); margin-bottom: 6px;">Achieved RPS</div> <div class="metric-label" style="color: var(--color-text-muted); margin-bottom: 6px;">Achieved RPS</div>
<div class="metric-value" style="font-size: 20px; font-weight: bold; color: var(--color-text-accent);">{{.AchievedRPS | printf "%d"}}</div> <div class="metric-value" style="font-size: 20px; font-weight: bold; color: var(--color-text-accent);">10,417</div>
</div> </div>
<div class="metric-item" style="background: var(--color-bg-dark); border: 1px solid var(--color-border); padding: 16px; border-radius: var(--radius-medium); text-align: center;"> <div class="metric-item" style="background: var(--color-bg-dark); border: 1px solid var(--color-border); padding: 16px; border-radius: var(--radius-medium); text-align: center;">
<div class="metric-label" style="color: var(--color-text-muted); margin-bottom: 6px;">Max Latency</div> <div class="metric-label" style="color: var(--color-text-muted); margin-bottom: 6px;">Max Latency</div>
<div class="metric-value" style="font-size: 20px; font-weight: bold;">{{.TargetLatency}}ms</div> <div class="metric-value" style="font-size: 20px; font-weight: bold;">200ms</div>
</div> </div>
<div class="metric-item" style="background: var(--color-bg-dark); border: 1px solid var(--color-border); padding: 16px; border-radius: var(--radius-medium); text-align: center;"> <div class="metric-item" style="background: var(--color-bg-dark); border: 1px solid var(--color-border); padding: 16px; border-radius: var(--radius-medium); text-align: center;">
<div class="metric-label" style="color: var(--color-text-muted); margin-bottom: 6px;">Actual Latency</div> <div class="metric-label" style="color: var(--color-text-muted); margin-bottom: 6px;">Actual Latency</div>
<div class="metric-value" style="font-size: 20px; font-weight: bold; color: var(--color-text-accent);">{{.ActualLatency | printf "%.0f"}}ms</div> <div class="metric-value" style="font-size: 20px; font-weight: bold; color: var(--color-text-accent);">87ms</div>
</div> </div>
</div> </div>
@ -639,9 +618,6 @@ input[name="tab"] {
<div>[INFO] Load balancer handled traffic with 0% errors</div> <div>[INFO] Load balancer handled traffic with 0% errors</div>
<div>[INFO] Cache hit ratio: 97%</div> <div>[INFO] Cache hit ratio: 97%</div>
<div>[SUCCESS] SLA met - all objectives achieved</div> <div>[SUCCESS] SLA met - all objectives achieved</div>
{{range .Feedback}}
<div>[SUCCESS] {{.}}</div>
{{end}}
</div> </div>
<div class="action-buttons" style="display: flex; justify-content: center; gap: 16px; margin-top: 32px;"> <div class="action-buttons" style="display: flex; justify-content: center; gap: 16px; margin-top: 32px;">
@ -660,10 +636,8 @@ input[name="tab"] {
const btn = document.getElementById('retry-button'); const btn = document.getElementById('retry-button');
btn.textContent = '⏳ Reloading...'; btn.textContent = '⏳ Reloading...';
btn.disabled = true; btn.disabled = true;
const levelId = '{{.LevelID}}';
const retryUrl = levelId ? `/play/${levelId}?retry=true` : '/game?retry=true';
setTimeout(() => { setTimeout(() => {
window.location.href = retryUrl; window.location.href = '/game?retry=true';
}, 1500); }, 1500);
} }
@ -674,6 +648,8 @@ input[name="tab"] {
window.location.href = '/game-modes'; window.location.href = '/game-modes';
} }
}); });
console.log("✅ SYSTEM SUCCESS SCREEN DISPLAYED");
</script> </script>
</div> </div>
</body> </body>

Loading…
Cancel
Save