Compare commits
1 Commits
main
...
dependabot
| Author | SHA1 | Date |
|---|---|---|
|
|
838909c40c | 6 months ago |
67 changed files with 1447 additions and 8844 deletions
@ -1,194 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
import ( |
|
||||||
"fmt" |
|
||||||
"hash/fnv" |
|
||||||
"time" |
|
||||||
) |
|
||||||
|
|
||||||
type CacheLogic struct{} |
|
||||||
|
|
||||||
// hash function to simulate URL patterns
|
|
||||||
func hash(s string) uint32 { |
|
||||||
h := fnv.New32a() |
|
||||||
h.Write([]byte(s)) |
|
||||||
return h.Sum32() |
|
||||||
} |
|
||||||
|
|
||||||
type CacheEntry struct { |
|
||||||
Data string |
|
||||||
Timestamp int |
|
||||||
AccessTime int |
|
||||||
AccessCount int |
|
||||||
InsertOrder int |
|
||||||
} |
|
||||||
|
|
||||||
func (c CacheLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) { |
|
||||||
// Extract cache properties
|
|
||||||
cacheTTL := int(AsFloat64(props["cacheTTL"])) |
|
||||||
if cacheTTL == 0 { |
|
||||||
cacheTTL = 300000 // default 5 minutes in ms
|
|
||||||
} |
|
||||||
|
|
||||||
maxEntries := int(AsFloat64(props["maxEntries"])) |
|
||||||
if maxEntries == 0 { |
|
||||||
maxEntries = 1000 // default max entries
|
|
||||||
} |
|
||||||
|
|
||||||
evictionPolicy := AsString(props["evictionPolicy"]) |
|
||||||
if evictionPolicy == "" { |
|
||||||
evictionPolicy = "LRU" // default eviction policy
|
|
||||||
} |
|
||||||
|
|
||||||
// Initialize cache data structures in props
|
|
||||||
cacheData, ok := props["_cacheData"].(map[string]*CacheEntry) |
|
||||||
if !ok { |
|
||||||
cacheData = make(map[string]*CacheEntry) |
|
||||||
props["_cacheData"] = cacheData |
|
||||||
} |
|
||||||
|
|
||||||
insertCounter, ok := props["_insertCounter"].(int) |
|
||||||
if !ok { |
|
||||||
insertCounter = 0 |
|
||||||
} |
|
||||||
|
|
||||||
// Current timestamp for this tick
|
|
||||||
currentTime := tick * 100 // assuming 100ms per tick
|
|
||||||
|
|
||||||
// Clean up expired entries first
|
|
||||||
c.cleanExpiredEntries(cacheData, currentTime, cacheTTL) |
|
||||||
|
|
||||||
output := []*Request{} |
|
||||||
|
|
||||||
for _, req := range queue { |
|
||||||
// For URL shortener simulation, use hash of request ID to simulate repeated URL access
|
|
||||||
// This creates realistic cache patterns where some URLs are accessed multiple times
|
|
||||||
hashValue := hash(req.ID) % 100 // Create 100 possible "URLs"
|
|
||||||
cacheKey := fmt.Sprintf("url-%d-%s", hashValue, req.Type) |
|
||||||
|
|
||||||
// Check for cache hit
|
|
||||||
entry, hit := cacheData[cacheKey] |
|
||||||
if hit && !c.isExpired(entry, currentTime, cacheTTL) { |
|
||||||
// Cache hit - return immediately with minimal latency
|
|
||||||
// Cache hit - served from cache component
|
|
||||||
reqCopy := *req |
|
||||||
reqCopy.LatencyMS += 1 // 1ms for in-memory access
|
|
||||||
reqCopy.Path = append(reqCopy.Path, "cache-hit") |
|
||||||
|
|
||||||
// Update access tracking for eviction policies
|
|
||||||
entry.AccessTime = currentTime |
|
||||||
entry.AccessCount++ |
|
||||||
|
|
||||||
output = append(output, &reqCopy) |
|
||||||
} else { |
|
||||||
// Cache miss - forward request downstream
|
|
||||||
// Cache miss - forwarding to database
|
|
||||||
reqCopy := *req |
|
||||||
reqCopy.Path = append(reqCopy.Path, "cache-miss") |
|
||||||
|
|
||||||
// For simulation purposes, we'll cache the "response" immediately
|
|
||||||
// In a real system, this would happen when the response comes back
|
|
||||||
insertCounter++ |
|
||||||
newEntry := &CacheEntry{ |
|
||||||
Data: "cached-data", // In real implementation, this would be the response data
|
|
||||||
Timestamp: currentTime, |
|
||||||
AccessTime: currentTime, |
|
||||||
AccessCount: 1, |
|
||||||
InsertOrder: insertCounter, |
|
||||||
} |
|
||||||
|
|
||||||
// First check if we need to evict before adding
|
|
||||||
if len(cacheData) >= maxEntries { |
|
||||||
c.evictEntry(cacheData, evictionPolicy) |
|
||||||
} |
|
||||||
|
|
||||||
// Now add the new entry
|
|
||||||
cacheData[cacheKey] = newEntry |
|
||||||
|
|
||||||
output = append(output, &reqCopy) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Update insert counter in props
|
|
||||||
props["_insertCounter"] = insertCounter |
|
||||||
|
|
||||||
return output, true |
|
||||||
} |
|
||||||
|
|
||||||
func (c CacheLogic) cleanExpiredEntries(cacheData map[string]*CacheEntry, currentTime, ttl int) { |
|
||||||
for key, entry := range cacheData { |
|
||||||
if c.isExpired(entry, currentTime, ttl) { |
|
||||||
delete(cacheData, key) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func (c CacheLogic) isExpired(entry *CacheEntry, currentTime, ttl int) bool { |
|
||||||
return (currentTime - entry.Timestamp) > ttl |
|
||||||
} |
|
||||||
|
|
||||||
func (c CacheLogic) evictEntry(cacheData map[string]*CacheEntry, policy string) { |
|
||||||
if len(cacheData) == 0 { |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
var keyToEvict string |
|
||||||
|
|
||||||
switch policy { |
|
||||||
case "LRU": |
|
||||||
// Evict least recently used
|
|
||||||
oldestTime := int(^uint(0) >> 1) // Max int
|
|
||||||
for key, entry := range cacheData { |
|
||||||
if entry.AccessTime < oldestTime { |
|
||||||
oldestTime = entry.AccessTime |
|
||||||
keyToEvict = key |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
case "LFU": |
|
||||||
// Evict least frequently used
|
|
||||||
minCount := int(^uint(0) >> 1) // Max int
|
|
||||||
for key, entry := range cacheData { |
|
||||||
if entry.AccessCount < minCount { |
|
||||||
minCount = entry.AccessCount |
|
||||||
keyToEvict = key |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
case "FIFO": |
|
||||||
// Evict first in (oldest insert order)
|
|
||||||
minOrder := int(^uint(0) >> 1) // Max int
|
|
||||||
for key, entry := range cacheData { |
|
||||||
if entry.InsertOrder < minOrder { |
|
||||||
minOrder = entry.InsertOrder |
|
||||||
keyToEvict = key |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
case "random": |
|
||||||
// Evict random entry
|
|
||||||
keys := make([]string, 0, len(cacheData)) |
|
||||||
for key := range cacheData { |
|
||||||
keys = append(keys, key) |
|
||||||
} |
|
||||||
if len(keys) > 0 { |
|
||||||
// Use timestamp as pseudo-random seed
|
|
||||||
seed := time.Now().UnixNano() |
|
||||||
keyToEvict = keys[seed%int64(len(keys))] |
|
||||||
} |
|
||||||
|
|
||||||
default: |
|
||||||
// Default to LRU
|
|
||||||
oldestTime := int(^uint(0) >> 1) |
|
||||||
for key, entry := range cacheData { |
|
||||||
if entry.AccessTime < oldestTime { |
|
||||||
oldestTime = entry.AccessTime |
|
||||||
keyToEvict = key |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if keyToEvict != "" { |
|
||||||
delete(cacheData, keyToEvict) |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,319 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
import ( |
|
||||||
"testing" |
|
||||||
) |
|
||||||
|
|
||||||
func TestCacheLogic_CacheHitMiss(t *testing.T) { |
|
||||||
cache := CacheLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"cacheTTL": 10000, // 10 seconds
|
|
||||||
"maxEntries": 100, |
|
||||||
"evictionPolicy": "LRU", |
|
||||||
} |
|
||||||
|
|
||||||
// First request should be a miss
|
|
||||||
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0, Path: []string{"start"}}} |
|
||||||
output1, alive := cache.Tick(props, req1, 1) |
|
||||||
|
|
||||||
if !alive { |
|
||||||
t.Errorf("Cache should be alive") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output1) != 1 { |
|
||||||
t.Errorf("Expected 1 output request, got %d", len(output1)) |
|
||||||
} |
|
||||||
|
|
||||||
// Should be cache miss
|
|
||||||
if output1[0].LatencyMS != 0 { // No latency added for miss
|
|
||||||
t.Errorf("Expected 0ms latency for cache miss, got %dms", output1[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Check path contains cache-miss
|
|
||||||
found := false |
|
||||||
for _, pathItem := range output1[0].Path { |
|
||||||
if pathItem == "cache-miss" { |
|
||||||
found = true |
|
||||||
break |
|
||||||
} |
|
||||||
} |
|
||||||
if !found { |
|
||||||
t.Errorf("Expected cache-miss in path, got %v", output1[0].Path) |
|
||||||
} |
|
||||||
|
|
||||||
// Second identical request should be a hit
|
|
||||||
req2 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0, Path: []string{"start"}}} |
|
||||||
output2, _ := cache.Tick(props, req2, 2) |
|
||||||
|
|
||||||
if len(output2) != 1 { |
|
||||||
t.Errorf("Expected 1 output request, got %d", len(output2)) |
|
||||||
} |
|
||||||
|
|
||||||
// Should be cache hit with 1ms latency
|
|
||||||
if output2[0].LatencyMS != 1 { |
|
||||||
t.Errorf("Expected 1ms latency for cache hit, got %dms", output2[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Check path contains cache-hit
|
|
||||||
found = false |
|
||||||
for _, pathItem := range output2[0].Path { |
|
||||||
if pathItem == "cache-hit" { |
|
||||||
found = true |
|
||||||
break |
|
||||||
} |
|
||||||
} |
|
||||||
if !found { |
|
||||||
t.Errorf("Expected cache-hit in path, got %v", output2[0].Path) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestCacheLogic_TTLExpiration(t *testing.T) { |
|
||||||
cache := CacheLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"cacheTTL": 1000, // 1 second
|
|
||||||
"maxEntries": 100, |
|
||||||
"evictionPolicy": "LRU", |
|
||||||
} |
|
||||||
|
|
||||||
// First request - cache miss
|
|
||||||
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req1, 1) |
|
||||||
|
|
||||||
// Second request within TTL - cache hit
|
|
||||||
req2 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output2, _ := cache.Tick(props, req2, 5) // 5 * 100ms = 500ms later
|
|
||||||
|
|
||||||
if output2[0].LatencyMS != 1 { |
|
||||||
t.Errorf("Expected cache hit (1ms), got %dms", output2[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Third request after TTL expiration - cache miss
|
|
||||||
req3 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output3, _ := cache.Tick(props, req3, 15) // 15 * 100ms = 1500ms later (expired)
|
|
||||||
|
|
||||||
if output3[0].LatencyMS != 0 { |
|
||||||
t.Errorf("Expected cache miss (0ms) after TTL expiration, got %dms", output3[0].LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestCacheLogic_MaxEntriesEviction(t *testing.T) { |
|
||||||
cache := CacheLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"cacheTTL": 10000, |
|
||||||
"maxEntries": 2, // Small cache size
|
|
||||||
"evictionPolicy": "LRU", |
|
||||||
} |
|
||||||
|
|
||||||
// Add first entry
|
|
||||||
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req1, 1) |
|
||||||
|
|
||||||
// Add second entry
|
|
||||||
req2 := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req2, 2) |
|
||||||
|
|
||||||
// Verify both are cached
|
|
||||||
req1Check := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output1Check, _ := cache.Tick(props, req1Check, 3) |
|
||||||
if output1Check[0].LatencyMS != 1 { |
|
||||||
t.Errorf("Expected cache hit for req1, got %dms latency", output1Check[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
req2Check := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}} |
|
||||||
output2Check, _ := cache.Tick(props, req2Check, 4) |
|
||||||
if output2Check[0].LatencyMS != 1 { |
|
||||||
t.Errorf("Expected cache hit for req2, got %dms latency", output2Check[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Add third entry (should evict LRU entry)
|
|
||||||
req3 := []*Request{{ID: "req3", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req3, 5) |
|
||||||
|
|
||||||
// req1 was accessed at tick 3, req2 at tick 4, so req1 should be evicted
|
|
||||||
req1CheckAgain := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output1, _ := cache.Tick(props, req1CheckAgain, 6) |
|
||||||
if output1[0].LatencyMS != 0 { |
|
||||||
t.Errorf("Expected cache miss for LRU evicted entry, got %dms latency", output1[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// After adding req1 back, the cache should be at capacity with different items
|
|
||||||
// We don't test further to avoid complex cascading eviction scenarios
|
|
||||||
} |
|
||||||
|
|
||||||
func TestCacheLogic_LRUEviction(t *testing.T) { |
|
||||||
cache := CacheLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"cacheTTL": 10000, |
|
||||||
"maxEntries": 2, |
|
||||||
"evictionPolicy": "LRU", |
|
||||||
} |
|
||||||
|
|
||||||
// Add two entries
|
|
||||||
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req1, 1) |
|
||||||
|
|
||||||
req2 := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req2, 2) |
|
||||||
|
|
||||||
// Access first entry (make it recently used)
|
|
||||||
req1Access := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req1Access, 3) |
|
||||||
|
|
||||||
// Add third entry (should evict req2, since req1 was more recently accessed)
|
|
||||||
req3 := []*Request{{ID: "req3", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req3, 4) |
|
||||||
|
|
||||||
// Verify that req2 was evicted (should be cache miss)
|
|
||||||
req2Check := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}} |
|
||||||
output2, _ := cache.Tick(props, req2Check, 5) |
|
||||||
|
|
||||||
if output2[0].LatencyMS != 0 { |
|
||||||
t.Errorf("Expected cache miss for LRU evicted entry, got %dms latency", output2[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// After adding req2 back, the cache should contain {req2, req1} or {req2, req3}
|
|
||||||
// depending on LRU logic. We don't test further to avoid cascading evictions.
|
|
||||||
} |
|
||||||
|
|
||||||
func TestCacheLogic_FIFOEviction(t *testing.T) { |
|
||||||
cache := CacheLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"cacheTTL": 10000, |
|
||||||
"maxEntries": 2, |
|
||||||
"evictionPolicy": "FIFO", |
|
||||||
} |
|
||||||
|
|
||||||
// Add two entries
|
|
||||||
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req1, 1) |
|
||||||
|
|
||||||
req2 := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req2, 2) |
|
||||||
|
|
||||||
// Access first entry multiple times (shouldn't matter for FIFO)
|
|
||||||
req1Access := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req1Access, 3) |
|
||||||
cache.Tick(props, req1Access, 4) |
|
||||||
|
|
||||||
// Add third entry (should evict req1, the first inserted)
|
|
||||||
req3 := []*Request{{ID: "req3", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req3, 5) |
|
||||||
|
|
||||||
// Check that req1 was evicted (first in, first out)
|
|
||||||
req1Check := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output1, _ := cache.Tick(props, req1Check, 6) |
|
||||||
|
|
||||||
if output1[0].LatencyMS != 0 { |
|
||||||
t.Errorf("Expected cache miss for FIFO evicted entry, got %dms latency", output1[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// After adding req1 back, the cache should contain {req2, req1} or {req3, req1}
|
|
||||||
// depending on FIFO logic. We don't test further to avoid cascading evictions.
|
|
||||||
} |
|
||||||
|
|
||||||
func TestCacheLogic_DefaultValues(t *testing.T) { |
|
||||||
cache := CacheLogic{} |
|
||||||
|
|
||||||
// Empty props should use defaults
|
|
||||||
props := map[string]any{} |
|
||||||
|
|
||||||
req := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output, _ := cache.Tick(props, req, 1) |
|
||||||
|
|
||||||
if len(output) != 1 { |
|
||||||
t.Errorf("Expected 1 output request") |
|
||||||
} |
|
||||||
|
|
||||||
// Should be cache miss with 0ms latency
|
|
||||||
if output[0].LatencyMS != 0 { |
|
||||||
t.Errorf("Expected 0ms latency for cache miss with defaults, got %dms", output[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Second request should be cache hit
|
|
||||||
req2 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output2, _ := cache.Tick(props, req2, 2) |
|
||||||
|
|
||||||
if output2[0].LatencyMS != 1 { |
|
||||||
t.Errorf("Expected 1ms latency for cache hit, got %dms", output2[0].LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestCacheLogic_SimpleEviction(t *testing.T) { |
|
||||||
cache := CacheLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"cacheTTL": 10000, |
|
||||||
"maxEntries": 1, // Only 1 entry allowed
|
|
||||||
"evictionPolicy": "LRU", |
|
||||||
} |
|
||||||
|
|
||||||
// Add first entry
|
|
||||||
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output1, _ := cache.Tick(props, req1, 1) |
|
||||||
if output1[0].LatencyMS != 0 { |
|
||||||
t.Errorf("First request should be cache miss, got %dms", output1[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Check it's cached
|
|
||||||
req1Again := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output1Again, _ := cache.Tick(props, req1Again, 2) |
|
||||||
if output1Again[0].LatencyMS != 1 { |
|
||||||
t.Errorf("Second request should be cache hit, got %dms", output1Again[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Add second entry (should evict first)
|
|
||||||
req2 := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}} |
|
||||||
output2, _ := cache.Tick(props, req2, 3) |
|
||||||
if output2[0].LatencyMS != 0 { |
|
||||||
t.Errorf("New request should be cache miss, got %dms", output2[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Check that first entry is now evicted
|
|
||||||
req1Final := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output1Final, _ := cache.Tick(props, req1Final, 4) |
|
||||||
if output1Final[0].LatencyMS != 0 { |
|
||||||
t.Errorf("Evicted entry should be cache miss, got %dms", output1Final[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Check that second entry is now also evicted (since req1 was re-added in step 4)
|
|
||||||
req2Again := []*Request{{ID: "req2", Type: "GET", LatencyMS: 0}} |
|
||||||
output2Again, _ := cache.Tick(props, req2Again, 5) |
|
||||||
if output2Again[0].LatencyMS != 0 { |
|
||||||
t.Errorf("Re-evicted entry should be cache miss, got %dms", output2Again[0].LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestCacheLogic_DifferentRequestTypes(t *testing.T) { |
|
||||||
cache := CacheLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"cacheTTL": 10000, |
|
||||||
"maxEntries": 100, |
|
||||||
"evictionPolicy": "LRU", |
|
||||||
} |
|
||||||
|
|
||||||
// Same ID but different type should be different cache entries
|
|
||||||
req1 := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
cache.Tick(props, req1, 1) |
|
||||||
|
|
||||||
req2 := []*Request{{ID: "req1", Type: "POST", LatencyMS: 0}} |
|
||||||
output2, _ := cache.Tick(props, req2, 2) |
|
||||||
|
|
||||||
// Should be cache miss since different type
|
|
||||||
if output2[0].LatencyMS != 0 { |
|
||||||
t.Errorf("Expected cache miss for different request type, got %dms latency", output2[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Original GET should still be cached
|
|
||||||
req1Again := []*Request{{ID: "req1", Type: "GET", LatencyMS: 0}} |
|
||||||
output1, _ := cache.Tick(props, req1Again, 3) |
|
||||||
|
|
||||||
if output1[0].LatencyMS != 1 { |
|
||||||
t.Errorf("Expected cache hit for original request type, got %dms latency", output1[0].LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,61 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
type DatabaseLogic struct{} |
|
||||||
|
|
||||||
func (d DatabaseLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) { |
|
||||||
// Extract database properties
|
|
||||||
replication := int(AsFloat64(props["replication"])) |
|
||||||
if replication == 0 { |
|
||||||
replication = 1 // default
|
|
||||||
} |
|
||||||
|
|
||||||
// Database capacity (could be based on instance size or explicit RPS)
|
|
||||||
maxRPS := int(AsFloat64(props["maxRPS"])) |
|
||||||
if maxRPS == 0 { |
|
||||||
maxRPS = 1000 // default capacity
|
|
||||||
} |
|
||||||
|
|
||||||
// Base latency for database operations
|
|
||||||
baseLatencyMs := int(AsFloat64(props["baseLatencyMs"])) |
|
||||||
if baseLatencyMs == 0 { |
|
||||||
baseLatencyMs = 10 // default 10ms for local DB operations
|
|
||||||
} |
|
||||||
|
|
||||||
// Process requests up to capacity
|
|
||||||
toProcess := queue |
|
||||||
if len(queue) > maxRPS { |
|
||||||
toProcess = queue[:maxRPS] |
|
||||||
// TODO: Could add queue overflow logic here
|
|
||||||
} |
|
||||||
|
|
||||||
output := []*Request{} |
|
||||||
|
|
||||||
for _, req := range toProcess { |
|
||||||
// Add database latency to the request
|
|
||||||
reqCopy := *req |
|
||||||
|
|
||||||
// Simulate different operation types and their latencies
|
|
||||||
operationLatency := baseLatencyMs |
|
||||||
|
|
||||||
// Simple heuristic: reads are faster than writes
|
|
||||||
if req.Type == "GET" || req.Type == "READ" { |
|
||||||
operationLatency = baseLatencyMs |
|
||||||
} else if req.Type == "POST" || req.Type == "WRITE" { |
|
||||||
operationLatency = baseLatencyMs * 2 // writes take longer
|
|
||||||
} |
|
||||||
|
|
||||||
// Replication factor affects write latency
|
|
||||||
if req.Type == "POST" || req.Type == "WRITE" { |
|
||||||
operationLatency += (replication - 1) * 5 // 5ms per replica
|
|
||||||
} |
|
||||||
|
|
||||||
reqCopy.LatencyMS += operationLatency |
|
||||||
reqCopy.Path = append(reqCopy.Path, "database-processed") |
|
||||||
|
|
||||||
output = append(output, &reqCopy) |
|
||||||
} |
|
||||||
|
|
||||||
// Database health (could simulate failures, connection issues, etc.)
|
|
||||||
// For now, assume always healthy
|
|
||||||
return output, true |
|
||||||
} |
|
||||||
@ -1,139 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
import ( |
|
||||||
"testing" |
|
||||||
) |
|
||||||
|
|
||||||
func TestDatabaseLogic_BasicProcessing(t *testing.T) { |
|
||||||
db := DatabaseLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"replication": 2, |
|
||||||
"maxRPS": 100, |
|
||||||
"baseLatencyMs": 15, |
|
||||||
} |
|
||||||
|
|
||||||
// Create test requests
|
|
||||||
reqs := []*Request{ |
|
||||||
{ID: "req1", Type: "GET", LatencyMS: 0, Path: []string{"start"}}, |
|
||||||
{ID: "req2", Type: "POST", LatencyMS: 0, Path: []string{"start"}}, |
|
||||||
} |
|
||||||
|
|
||||||
output, alive := db.Tick(props, reqs, 1) |
|
||||||
|
|
||||||
if !alive { |
|
||||||
t.Errorf("Database should be alive") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 2 { |
|
||||||
t.Errorf("Expected 2 output requests, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check read latency (base latency)
|
|
||||||
readReq := output[0] |
|
||||||
if readReq.LatencyMS != 15 { |
|
||||||
t.Errorf("Expected read latency 15ms, got %dms", readReq.LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Check write latency (base * 2 + replication penalty)
|
|
||||||
writeReq := output[1] |
|
||||||
expectedWriteLatency := 15*2 + (2-1)*5 // 30 + 5 = 35ms
|
|
||||||
if writeReq.LatencyMS != expectedWriteLatency { |
|
||||||
t.Errorf("Expected write latency %dms, got %dms", expectedWriteLatency, writeReq.LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDatabaseLogic_CapacityLimit(t *testing.T) { |
|
||||||
db := DatabaseLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"maxRPS": 2, |
|
||||||
"baseLatencyMs": 10, |
|
||||||
} |
|
||||||
|
|
||||||
// Create more requests than capacity
|
|
||||||
reqs := []*Request{ |
|
||||||
{ID: "req1", Type: "GET"}, |
|
||||||
{ID: "req2", Type: "GET"}, |
|
||||||
{ID: "req3", Type: "GET"}, // This should be dropped
|
|
||||||
} |
|
||||||
|
|
||||||
output, _ := db.Tick(props, reqs, 1) |
|
||||||
|
|
||||||
if len(output) != 2 { |
|
||||||
t.Errorf("Expected capacity limit of 2, but processed %d requests", len(output)) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDatabaseLogic_DefaultValues(t *testing.T) { |
|
||||||
db := DatabaseLogic{} |
|
||||||
|
|
||||||
// Empty props should use defaults
|
|
||||||
props := map[string]any{} |
|
||||||
|
|
||||||
reqs := []*Request{ |
|
||||||
{ID: "req1", Type: "GET", LatencyMS: 0}, |
|
||||||
} |
|
||||||
|
|
||||||
output, _ := db.Tick(props, reqs, 1) |
|
||||||
|
|
||||||
if len(output) != 1 { |
|
||||||
t.Errorf("Expected 1 output request") |
|
||||||
} |
|
||||||
|
|
||||||
// Should use default 10ms base latency
|
|
||||||
if output[0].LatencyMS != 10 { |
|
||||||
t.Errorf("Expected default latency 10ms, got %dms", output[0].LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDatabaseLogic_ReplicationEffect(t *testing.T) { |
|
||||||
db := DatabaseLogic{} |
|
||||||
|
|
||||||
// Test with high replication
|
|
||||||
props := map[string]any{ |
|
||||||
"replication": 5, |
|
||||||
"baseLatencyMs": 10, |
|
||||||
} |
|
||||||
|
|
||||||
reqs := []*Request{ |
|
||||||
{ID: "req1", Type: "POST", LatencyMS: 0}, |
|
||||||
} |
|
||||||
|
|
||||||
output, _ := db.Tick(props, reqs, 1) |
|
||||||
|
|
||||||
if len(output) != 1 { |
|
||||||
t.Errorf("Expected 1 output request") |
|
||||||
} |
|
||||||
|
|
||||||
// Write latency: base*2 + (replication-1)*5 = 10*2 + (5-1)*5 = 20 + 20 = 40ms
|
|
||||||
expectedLatency := 10*2 + (5-1)*5 |
|
||||||
if output[0].LatencyMS != expectedLatency { |
|
||||||
t.Errorf("Expected latency %dms with replication=5, got %dms", expectedLatency, output[0].LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDatabaseLogic_ReadVsWrite(t *testing.T) { |
|
||||||
db := DatabaseLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"replication": 1, |
|
||||||
"baseLatencyMs": 20, |
|
||||||
} |
|
||||||
|
|
||||||
readReq := []*Request{{ID: "read", Type: "GET", LatencyMS: 0}} |
|
||||||
writeReq := []*Request{{ID: "write", Type: "POST", LatencyMS: 0}} |
|
||||||
|
|
||||||
readOutput, _ := db.Tick(props, readReq, 1) |
|
||||||
writeOutput, _ := db.Tick(props, writeReq, 1) |
|
||||||
|
|
||||||
// Read should be base latency
|
|
||||||
if readOutput[0].LatencyMS != 20 { |
|
||||||
t.Errorf("Expected read latency 20ms, got %dms", readOutput[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Write should be double base latency (no replication penalty with replication=1)
|
|
||||||
if writeOutput[0].LatencyMS != 40 { |
|
||||||
t.Errorf("Expected write latency 40ms, got %dms", writeOutput[0].LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,203 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
type DataPipelineLogic struct{} |
|
||||||
|
|
||||||
type DataBatch struct { |
|
||||||
ID string |
|
||||||
RecordCount int |
|
||||||
Timestamp int |
|
||||||
ProcessingMS int |
|
||||||
} |
|
||||||
|
|
||||||
type PipelineState struct { |
|
||||||
ProcessingQueue []DataBatch |
|
||||||
CompletedBatches int |
|
||||||
TotalRecords int |
|
||||||
BacklogSize int |
|
||||||
} |
|
||||||
|
|
||||||
func (d DataPipelineLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) { |
|
||||||
// Extract data pipeline properties
|
|
||||||
batchSize := int(AsFloat64(props["batchSize"])) |
|
||||||
if batchSize == 0 { |
|
||||||
batchSize = 500 // default batch size
|
|
||||||
} |
|
||||||
|
|
||||||
transformation := AsString(props["transformation"]) |
|
||||||
if transformation == "" { |
|
||||||
transformation = "map" // default transformation
|
|
||||||
} |
|
||||||
|
|
||||||
// Get pipeline state from props (persistent state)
|
|
||||||
state, ok := props["_pipelineState"].(PipelineState) |
|
||||||
if !ok { |
|
||||||
state = PipelineState{ |
|
||||||
ProcessingQueue: []DataBatch{}, |
|
||||||
CompletedBatches: 0, |
|
||||||
TotalRecords: 0, |
|
||||||
BacklogSize: 0, |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
currentTime := tick * 100 // Convert tick to milliseconds
|
|
||||||
|
|
||||||
// Convert incoming requests to data batches
|
|
||||||
if len(queue) > 0 { |
|
||||||
// Group requests into batches
|
|
||||||
batches := d.createBatches(queue, batchSize, currentTime, transformation) |
|
||||||
|
|
||||||
// Add batches to processing queue
|
|
||||||
state.ProcessingQueue = append(state.ProcessingQueue, batches...) |
|
||||||
state.BacklogSize += len(queue) |
|
||||||
} |
|
||||||
|
|
||||||
// Process batches that are ready (completed their processing time)
|
|
||||||
output := []*Request{} |
|
||||||
remainingBatches := []DataBatch{} |
|
||||||
|
|
||||||
for _, batch := range state.ProcessingQueue { |
|
||||||
if currentTime >= batch.Timestamp+batch.ProcessingMS { |
|
||||||
// Batch is complete - create output requests
|
|
||||||
for i := 0; i < batch.RecordCount; i++ { |
|
||||||
processedReq := &Request{ |
|
||||||
ID: batch.ID + "-record-" + string(rune('0'+i)), |
|
||||||
Timestamp: batch.Timestamp, |
|
||||||
LatencyMS: batch.ProcessingMS, |
|
||||||
Origin: "data-pipeline", |
|
||||||
Type: "PROCESSED", |
|
||||||
Path: []string{"pipeline-" + transformation}, |
|
||||||
} |
|
||||||
output = append(output, processedReq) |
|
||||||
} |
|
||||||
|
|
||||||
state.CompletedBatches++ |
|
||||||
state.TotalRecords += batch.RecordCount |
|
||||||
} else { |
|
||||||
// Batch still processing
|
|
||||||
remainingBatches = append(remainingBatches, batch) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
state.ProcessingQueue = remainingBatches |
|
||||||
state.BacklogSize = len(remainingBatches) * batchSize |
|
||||||
|
|
||||||
// Update persistent state
|
|
||||||
props["_pipelineState"] = state |
|
||||||
|
|
||||||
// Health check: pipeline is healthy if backlog is not too large
|
|
||||||
maxBacklogSize := batchSize * 20 // Allow up to 20 batches in backlog
|
|
||||||
healthy := state.BacklogSize < maxBacklogSize |
|
||||||
|
|
||||||
return output, healthy |
|
||||||
} |
|
||||||
|
|
||||||
// createBatches groups requests into batches and calculates processing time
|
|
||||||
func (d DataPipelineLogic) createBatches(requests []*Request, batchSize int, timestamp int, transformation string) []DataBatch { |
|
||||||
batches := []DataBatch{} |
|
||||||
|
|
||||||
for i := 0; i < len(requests); i += batchSize { |
|
||||||
end := i + batchSize |
|
||||||
if end > len(requests) { |
|
||||||
end = len(requests) |
|
||||||
} |
|
||||||
|
|
||||||
recordCount := end - i |
|
||||||
processingTime := d.calculateProcessingTime(recordCount, transformation) |
|
||||||
|
|
||||||
batch := DataBatch{ |
|
||||||
ID: "batch-" + string(rune('A'+len(batches))), |
|
||||||
RecordCount: recordCount, |
|
||||||
Timestamp: timestamp, |
|
||||||
ProcessingMS: processingTime, |
|
||||||
} |
|
||||||
|
|
||||||
batches = append(batches, batch) |
|
||||||
} |
|
||||||
|
|
||||||
return batches |
|
||||||
} |
|
||||||
|
|
||||||
// calculateProcessingTime determines how long a batch takes to process based on transformation type
|
|
||||||
func (d DataPipelineLogic) calculateProcessingTime(recordCount int, transformation string) int { |
|
||||||
// Base processing time per record
|
|
||||||
baseTimePerRecord := d.getTransformationComplexity(transformation) |
|
||||||
|
|
||||||
// Total time scales with record count but with some economies of scale
|
|
||||||
totalTime := float64(recordCount) * baseTimePerRecord |
|
||||||
|
|
||||||
// Add batch overhead (setup, teardown, I/O)
|
|
||||||
batchOverhead := d.getBatchOverhead(transformation) |
|
||||||
totalTime += batchOverhead |
|
||||||
|
|
||||||
// Apply economies of scale for larger batches (slightly more efficient)
|
|
||||||
if recordCount > 100 { |
|
||||||
scaleFactor := 0.9 // 10% efficiency gain for large batches
|
|
||||||
totalTime *= scaleFactor |
|
||||||
} |
|
||||||
|
|
||||||
return int(totalTime) |
|
||||||
} |
|
||||||
|
|
||||||
// getTransformationComplexity returns base processing time per record in milliseconds
|
|
||||||
func (d DataPipelineLogic) getTransformationComplexity(transformation string) float64 { |
|
||||||
switch transformation { |
|
||||||
case "map": |
|
||||||
return 1.0 // Simple field mapping
|
|
||||||
case "filter": |
|
||||||
return 0.5 // Just evaluate conditions
|
|
||||||
case "sort": |
|
||||||
return 3.0 // Sorting requires more compute
|
|
||||||
case "aggregate": |
|
||||||
return 2.0 // Grouping and calculating aggregates
|
|
||||||
case "join": |
|
||||||
return 5.0 // Most expensive - joining with other datasets
|
|
||||||
case "deduplicate": |
|
||||||
return 2.5 // Hash-based deduplication
|
|
||||||
case "validate": |
|
||||||
return 1.5 // Data validation and cleaning
|
|
||||||
case "enrich": |
|
||||||
return 4.0 // Enriching with external data
|
|
||||||
case "compress": |
|
||||||
return 1.2 // Compression processing
|
|
||||||
case "encrypt": |
|
||||||
return 2.0 // Encryption overhead
|
|
||||||
default: |
|
||||||
return 1.0 // Default to simple transformation
|
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// getBatchOverhead returns fixed overhead time per batch in milliseconds
|
|
||||||
func (d DataPipelineLogic) getBatchOverhead(transformation string) float64 { |
|
||||||
switch transformation { |
|
||||||
case "map", "filter", "validate": |
|
||||||
return 50.0 // Low overhead for simple operations
|
|
||||||
case "sort", "aggregate", "deduplicate": |
|
||||||
return 200.0 // Medium overhead for complex operations
|
|
||||||
case "join", "enrich": |
|
||||||
return 500.0 // High overhead for operations requiring external data
|
|
||||||
case "compress", "encrypt": |
|
||||||
return 100.0 // Medium overhead for I/O operations
|
|
||||||
default: |
|
||||||
return 100.0 // Default overhead
|
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Helper function to get pipeline statistics
|
|
||||||
func (d DataPipelineLogic) GetPipelineStats(props map[string]any) map[string]interface{} { |
|
||||||
state, ok := props["_pipelineState"].(PipelineState) |
|
||||||
if !ok { |
|
||||||
return map[string]interface{}{ |
|
||||||
"completedBatches": 0, |
|
||||||
"totalRecords": 0, |
|
||||||
"backlogSize": 0, |
|
||||||
"queuedBatches": 0, |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
return map[string]interface{}{ |
|
||||||
"completedBatches": state.CompletedBatches, |
|
||||||
"totalRecords": state.TotalRecords, |
|
||||||
"backlogSize": state.BacklogSize, |
|
||||||
"queuedBatches": len(state.ProcessingQueue), |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,396 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
import ( |
|
||||||
"testing" |
|
||||||
) |
|
||||||
|
|
||||||
func TestDataPipelineLogic_BasicProcessing(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"batchSize": 100.0, |
|
||||||
"transformation": "map", |
|
||||||
} |
|
||||||
|
|
||||||
// Create 50 requests (less than batch size)
|
|
||||||
requests := make([]*Request, 50) |
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + i)), Type: "DATA", LatencyMS: 0} |
|
||||||
} |
|
||||||
|
|
||||||
// First tick - should create batch and start processing
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected data pipeline to be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
// Should not have output yet (batch is still processing)
|
|
||||||
if len(output) != 0 { |
|
||||||
t.Errorf("Expected no output during processing, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check that batch was created
|
|
||||||
state, ok := props["_pipelineState"].(PipelineState) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected pipeline state to be created") |
|
||||||
} |
|
||||||
|
|
||||||
if len(state.ProcessingQueue) != 1 { |
|
||||||
t.Errorf("Expected 1 batch in processing queue, got %d", len(state.ProcessingQueue)) |
|
||||||
} |
|
||||||
|
|
||||||
if state.ProcessingQueue[0].RecordCount != 50 { |
|
||||||
t.Errorf("Expected batch with 50 records, got %d", state.ProcessingQueue[0].RecordCount) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDataPipelineLogic_BatchCompletion(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"batchSize": 10.0, |
|
||||||
"transformation": "filter", // Fast transformation
|
|
||||||
} |
|
||||||
|
|
||||||
// Create 5 requests
|
|
||||||
requests := make([]*Request, 5) |
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + i)), Type: "DATA", LatencyMS: 0} |
|
||||||
} |
|
||||||
|
|
||||||
// First tick - start processing
|
|
||||||
logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
// Wait enough ticks for processing to complete
|
|
||||||
// Filter transformation should complete quickly
|
|
||||||
var output []*Request |
|
||||||
var healthy bool |
|
||||||
|
|
||||||
for tick := 2; tick <= 10; tick++ { |
|
||||||
output, healthy = logic.Tick(props, []*Request{}, tick) |
|
||||||
if len(output) > 0 { |
|
||||||
break |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected data pipeline to be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
// Should have output matching input count
|
|
||||||
if len(output) != 5 { |
|
||||||
t.Errorf("Expected 5 output records, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check output structure
|
|
||||||
for _, req := range output { |
|
||||||
if req.Type != "PROCESSED" { |
|
||||||
t.Errorf("Expected PROCESSED type, got %s", req.Type) |
|
||||||
} |
|
||||||
if req.Origin != "data-pipeline" { |
|
||||||
t.Errorf("Expected data-pipeline origin, got %s", req.Origin) |
|
||||||
} |
|
||||||
if len(req.Path) == 0 || req.Path[0] != "pipeline-filter" { |
|
||||||
t.Error("Expected path to indicate filter transformation") |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDataPipelineLogic_MultipleBatches(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"batchSize": 10.0, |
|
||||||
"transformation": "map", |
|
||||||
} |
|
||||||
|
|
||||||
// Create 25 requests (should create 3 batches: 10, 10, 5)
|
|
||||||
requests := make([]*Request, 25) |
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + i)), Type: "DATA", LatencyMS: 0} |
|
||||||
} |
|
||||||
|
|
||||||
// First tick - create batches
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected data pipeline to be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 0 { |
|
||||||
t.Error("Expected no immediate output") |
|
||||||
} |
|
||||||
|
|
||||||
// Check that 3 batches were created
|
|
||||||
state, ok := props["_pipelineState"].(PipelineState) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected pipeline state to be created") |
|
||||||
} |
|
||||||
|
|
||||||
if len(state.ProcessingQueue) != 3 { |
|
||||||
t.Errorf("Expected 3 batches in processing queue, got %d", len(state.ProcessingQueue)) |
|
||||||
} |
|
||||||
|
|
||||||
// Verify batch sizes
|
|
||||||
expectedSizes := []int{10, 10, 5} |
|
||||||
for i, batch := range state.ProcessingQueue { |
|
||||||
if batch.RecordCount != expectedSizes[i] { |
|
||||||
t.Errorf("Expected batch %d to have %d records, got %d", |
|
||||||
i, expectedSizes[i], batch.RecordCount) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDataPipelineLogic_TransformationComplexity(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
transformations := []string{"filter", "map", "sort", "aggregate", "join"} |
|
||||||
|
|
||||||
for _, transformation := range transformations { |
|
||||||
t.Run(transformation, func(t *testing.T) { |
|
||||||
complexity := logic.getTransformationComplexity(transformation) |
|
||||||
|
|
||||||
// Verify relative complexity ordering
|
|
||||||
switch transformation { |
|
||||||
case "filter": |
|
||||||
if complexity >= logic.getTransformationComplexity("map") { |
|
||||||
t.Error("Filter should be simpler than map") |
|
||||||
} |
|
||||||
case "join": |
|
||||||
if complexity <= logic.getTransformationComplexity("aggregate") { |
|
||||||
t.Error("Join should be more complex than aggregate") |
|
||||||
} |
|
||||||
case "sort": |
|
||||||
if complexity <= logic.getTransformationComplexity("map") { |
|
||||||
t.Error("Sort should be more complex than map") |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if complexity <= 0 { |
|
||||||
t.Errorf("Expected positive complexity for %s", transformation) |
|
||||||
} |
|
||||||
}) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDataPipelineLogic_BatchOverhead(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
// Test different overhead levels
|
|
||||||
testCases := []struct { |
|
||||||
transformation string |
|
||||||
expectedRange [2]float64 // [min, max]
|
|
||||||
}{ |
|
||||||
{"map", [2]float64{0, 100}}, // Low overhead
|
|
||||||
{"join", [2]float64{300, 600}}, // High overhead
|
|
||||||
{"sort", [2]float64{150, 300}}, // Medium overhead
|
|
||||||
} |
|
||||||
|
|
||||||
for _, tc := range testCases { |
|
||||||
overhead := logic.getBatchOverhead(tc.transformation) |
|
||||||
|
|
||||||
if overhead < tc.expectedRange[0] || overhead > tc.expectedRange[1] { |
|
||||||
t.Errorf("Expected %s overhead between %.0f-%.0f, got %.0f", |
|
||||||
tc.transformation, tc.expectedRange[0], tc.expectedRange[1], overhead) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDataPipelineLogic_ProcessingTime(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
// Test that processing time scales with record count
|
|
||||||
smallBatch := logic.calculateProcessingTime(10, "map") |
|
||||||
largeBatch := logic.calculateProcessingTime(100, "map") |
|
||||||
|
|
||||||
if largeBatch <= smallBatch { |
|
||||||
t.Error("Expected larger batch to take more time") |
|
||||||
} |
|
||||||
|
|
||||||
// Test that complex transformations take longer
|
|
||||||
simpleTime := logic.calculateProcessingTime(50, "filter") |
|
||||||
complexTime := logic.calculateProcessingTime(50, "join") |
|
||||||
|
|
||||||
if complexTime <= simpleTime { |
|
||||||
t.Error("Expected complex transformation to take longer") |
|
||||||
} |
|
||||||
|
|
||||||
// Test economies of scale (large batches should be more efficient per record)
|
|
||||||
smallPerRecord := float64(smallBatch) / 10.0 |
|
||||||
largePerRecord := float64(largeBatch) / 100.0 |
|
||||||
|
|
||||||
if largePerRecord >= smallPerRecord { |
|
||||||
t.Error("Expected economies of scale for larger batches") |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDataPipelineLogic_HealthCheck(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"batchSize": 10.0, |
|
||||||
"transformation": "join", // Slow transformation
|
|
||||||
} |
|
||||||
|
|
||||||
// Create a large number of requests to test backlog health
|
|
||||||
requests := make([]*Request, 300) // 30 batches (above healthy threshold)
|
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + (i % 26))), Type: "DATA", LatencyMS: 0} |
|
||||||
} |
|
||||||
|
|
||||||
// First tick - should create many batches
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
// Should be unhealthy due to large backlog
|
|
||||||
if healthy { |
|
||||||
t.Error("Expected data pipeline to be unhealthy with large backlog") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 0 { |
|
||||||
t.Error("Expected no immediate output with slow transformation") |
|
||||||
} |
|
||||||
|
|
||||||
// Check backlog size
|
|
||||||
state, ok := props["_pipelineState"].(PipelineState) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected pipeline state to be created") |
|
||||||
} |
|
||||||
|
|
||||||
if state.BacklogSize < 200 { |
|
||||||
t.Errorf("Expected large backlog, got %d", state.BacklogSize) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDataPipelineLogic_DefaultValues(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
// Empty props should use defaults
|
|
||||||
props := map[string]any{} |
|
||||||
|
|
||||||
requests := []*Request{{ID: "1", Type: "DATA", LatencyMS: 0}} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected pipeline to be healthy with default values") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 0 { |
|
||||||
t.Error("Expected no immediate output") |
|
||||||
} |
|
||||||
|
|
||||||
// Should use default batch size and transformation
|
|
||||||
state, ok := props["_pipelineState"].(PipelineState) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected pipeline state to be created with defaults") |
|
||||||
} |
|
||||||
|
|
||||||
if len(state.ProcessingQueue) != 1 { |
|
||||||
t.Error("Expected one batch with default settings") |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDataPipelineLogic_PipelineStats(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"batchSize": 5.0, |
|
||||||
"transformation": "filter", |
|
||||||
} |
|
||||||
|
|
||||||
// Initial stats should be empty
|
|
||||||
stats := logic.GetPipelineStats(props) |
|
||||||
if stats["completedBatches"] != 0 { |
|
||||||
t.Error("Expected initial completed batches to be 0") |
|
||||||
} |
|
||||||
|
|
||||||
// Process some data
|
|
||||||
requests := make([]*Request, 10) |
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + i)), Type: "DATA", LatencyMS: 0} |
|
||||||
} |
|
||||||
|
|
||||||
logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
// Check stats after processing
|
|
||||||
stats = logic.GetPipelineStats(props) |
|
||||||
if stats["queuedBatches"] != 2 { |
|
||||||
t.Errorf("Expected 2 queued batches, got %v", stats["queuedBatches"]) |
|
||||||
} |
|
||||||
|
|
||||||
if stats["backlogSize"] != 10 { |
|
||||||
t.Errorf("Expected backlog size of 10, got %v", stats["backlogSize"]) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDataPipelineLogic_ContinuousProcessing(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"batchSize": 5.0, |
|
||||||
"transformation": "map", |
|
||||||
} |
|
||||||
|
|
||||||
// Process multiple waves of data
|
|
||||||
totalOutput := 0 |
|
||||||
|
|
||||||
for wave := 0; wave < 3; wave++ { |
|
||||||
requests := make([]*Request, 5) |
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('A' + wave*5 + i)), Type: "DATA", LatencyMS: 0} |
|
||||||
} |
|
||||||
|
|
||||||
// Process each wave
|
|
||||||
for tick := wave*10 + 1; tick <= wave*10+5; tick++ { |
|
||||||
var output []*Request |
|
||||||
if tick == wave*10+1 { |
|
||||||
output, _ = logic.Tick(props, requests, tick) |
|
||||||
} else { |
|
||||||
output, _ = logic.Tick(props, []*Request{}, tick) |
|
||||||
} |
|
||||||
totalOutput += len(output) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Should have processed all data eventually
|
|
||||||
if totalOutput != 15 { |
|
||||||
t.Errorf("Expected 15 total output records, got %d", totalOutput) |
|
||||||
} |
|
||||||
|
|
||||||
// Check final stats
|
|
||||||
stats := logic.GetPipelineStats(props) |
|
||||||
if stats["totalRecords"] != 15 { |
|
||||||
t.Errorf("Expected 15 total records processed, got %v", stats["totalRecords"]) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestDataPipelineLogic_EmptyQueue(t *testing.T) { |
|
||||||
logic := DataPipelineLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"batchSize": 10.0, |
|
||||||
"transformation": "map", |
|
||||||
} |
|
||||||
|
|
||||||
// Process empty queue
|
|
||||||
output, healthy := logic.Tick(props, []*Request{}, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected pipeline to be healthy with empty queue") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 0 { |
|
||||||
t.Error("Expected no output with empty queue") |
|
||||||
} |
|
||||||
|
|
||||||
// State should be initialized but empty
|
|
||||||
state, ok := props["_pipelineState"].(PipelineState) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected pipeline state to be initialized") |
|
||||||
} |
|
||||||
|
|
||||||
if len(state.ProcessingQueue) != 0 { |
|
||||||
t.Error("Expected empty processing queue") |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,115 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
type MessageQueueLogic struct{} |
|
||||||
|
|
||||||
type QueuedMessage struct { |
|
||||||
RequestID string |
|
||||||
Timestamp int |
|
||||||
MessageData string |
|
||||||
RetryCount int |
|
||||||
} |
|
||||||
|
|
||||||
func (mq MessageQueueLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) { |
|
||||||
// Extract message queue properties
|
|
||||||
queueCapacity := int(AsFloat64(props["queueCapacity"])) |
|
||||||
if queueCapacity == 0 { |
|
||||||
queueCapacity = 1000 // default capacity
|
|
||||||
} |
|
||||||
|
|
||||||
retentionSeconds := int(AsFloat64(props["retentionSeconds"])) |
|
||||||
if retentionSeconds == 0 { |
|
||||||
retentionSeconds = 86400 // default 24 hours in seconds
|
|
||||||
} |
|
||||||
|
|
||||||
// Processing rate (messages per tick)
|
|
||||||
processingRate := int(AsFloat64(props["processingRate"])) |
|
||||||
if processingRate == 0 { |
|
||||||
processingRate = 100 // default 100 messages per tick
|
|
||||||
} |
|
||||||
|
|
||||||
// Current timestamp for this tick
|
|
||||||
currentTime := tick * 100 // assuming 100ms per tick
|
|
||||||
|
|
||||||
// Initialize queue storage in props
|
|
||||||
messageQueue, ok := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if !ok { |
|
||||||
messageQueue = []QueuedMessage{} |
|
||||||
} |
|
||||||
|
|
||||||
// Clean up expired messages based on retention policy
|
|
||||||
messageQueue = mq.cleanExpiredMessages(messageQueue, currentTime, retentionSeconds*1000) |
|
||||||
|
|
||||||
// First, process existing messages from the queue (FIFO order)
|
|
||||||
output := []*Request{} |
|
||||||
messagesToProcess := len(messageQueue) |
|
||||||
if messagesToProcess > processingRate { |
|
||||||
messagesToProcess = processingRate |
|
||||||
} |
|
||||||
|
|
||||||
for i := 0; i < messagesToProcess; i++ { |
|
||||||
if len(messageQueue) == 0 { |
|
||||||
break |
|
||||||
} |
|
||||||
|
|
||||||
// Dequeue message (FIFO - take from front)
|
|
||||||
message := messageQueue[0] |
|
||||||
messageQueue = messageQueue[1:] |
|
||||||
|
|
||||||
// Create request for downstream processing
|
|
||||||
processedReq := &Request{ |
|
||||||
ID: message.RequestID, |
|
||||||
Timestamp: message.Timestamp, |
|
||||||
LatencyMS: 2, // Small latency for queue processing
|
|
||||||
Origin: "message-queue", |
|
||||||
Type: "PROCESS", |
|
||||||
Path: []string{"queued-message"}, |
|
||||||
} |
|
||||||
|
|
||||||
output = append(output, processedReq) |
|
||||||
} |
|
||||||
|
|
||||||
// Then, add incoming requests to the queue for next tick
|
|
||||||
for _, req := range queue { |
|
||||||
// Check if queue is at capacity
|
|
||||||
if len(messageQueue) >= queueCapacity { |
|
||||||
// Queue full - message is dropped (or could implement backpressure)
|
|
||||||
// For now, we'll drop the message and add latency penalty
|
|
||||||
reqCopy := *req |
|
||||||
reqCopy.LatencyMS += 1000 // High latency penalty for dropped messages
|
|
||||||
reqCopy.Path = append(reqCopy.Path, "queue-full-dropped") |
|
||||||
// Don't add to output as message was dropped
|
|
||||||
continue |
|
||||||
} |
|
||||||
|
|
||||||
// Add message to queue
|
|
||||||
message := QueuedMessage{ |
|
||||||
RequestID: req.ID, |
|
||||||
Timestamp: currentTime, |
|
||||||
MessageData: "message-payload", // In real system, this would be the actual message
|
|
||||||
RetryCount: 0, |
|
||||||
} |
|
||||||
messageQueue = append(messageQueue, message) |
|
||||||
} |
|
||||||
|
|
||||||
// Update queue storage in props
|
|
||||||
props["_messageQueue"] = messageQueue |
|
||||||
|
|
||||||
// Queue is healthy if not at capacity or if we can still process messages
|
|
||||||
// Queue becomes unhealthy only when completely full AND we can't process anything
|
|
||||||
healthy := len(messageQueue) < queueCapacity || processingRate > 0 |
|
||||||
|
|
||||||
return output, healthy |
|
||||||
} |
|
||||||
|
|
||||||
func (mq MessageQueueLogic) cleanExpiredMessages(messageQueue []QueuedMessage, currentTime, retentionMs int) []QueuedMessage { |
|
||||||
cleaned := []QueuedMessage{} |
|
||||||
|
|
||||||
for _, message := range messageQueue { |
|
||||||
if (currentTime - message.Timestamp) <= retentionMs { |
|
||||||
cleaned = append(cleaned, message) |
|
||||||
} |
|
||||||
// Expired messages are dropped
|
|
||||||
} |
|
||||||
|
|
||||||
return cleaned |
|
||||||
} |
|
||||||
@ -1,329 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
import ( |
|
||||||
"testing" |
|
||||||
) |
|
||||||
|
|
||||||
func TestMessageQueueLogic_BasicProcessing(t *testing.T) { |
|
||||||
mq := MessageQueueLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"queueCapacity": 10, |
|
||||||
"retentionSeconds": 3600, // 1 hour
|
|
||||||
"processingRate": 5, |
|
||||||
} |
|
||||||
|
|
||||||
// Add some messages to the queue
|
|
||||||
reqs := []*Request{ |
|
||||||
{ID: "msg1", Type: "SEND", LatencyMS: 0, Timestamp: 100}, |
|
||||||
{ID: "msg2", Type: "SEND", LatencyMS: 0, Timestamp: 100}, |
|
||||||
{ID: "msg3", Type: "SEND", LatencyMS: 0, Timestamp: 100}, |
|
||||||
} |
|
||||||
|
|
||||||
output, healthy := mq.Tick(props, reqs, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Errorf("Message queue should be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
// No immediate output since messages are queued first
|
|
||||||
if len(output) != 0 { |
|
||||||
t.Errorf("Expected 0 immediate output (messages queued), got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check that messages are in the queue
|
|
||||||
messageQueue, ok := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if !ok { |
|
||||||
t.Errorf("Expected message queue to be initialized") |
|
||||||
} |
|
||||||
|
|
||||||
if len(messageQueue) != 3 { |
|
||||||
t.Errorf("Expected 3 messages in queue, got %d", len(messageQueue)) |
|
||||||
} |
|
||||||
|
|
||||||
// Process the queue (no new incoming messages)
|
|
||||||
output2, _ := mq.Tick(props, []*Request{}, 2) |
|
||||||
|
|
||||||
// Should process up to processingRate (5) messages
|
|
||||||
if len(output2) != 3 { |
|
||||||
t.Errorf("Expected 3 processed messages, got %d", len(output2)) |
|
||||||
} |
|
||||||
|
|
||||||
// Queue should now be empty
|
|
||||||
messageQueue2, _ := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if len(messageQueue2) != 0 { |
|
||||||
t.Errorf("Expected empty queue after processing, got %d messages", len(messageQueue2)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check output message properties
|
|
||||||
for _, msg := range output2 { |
|
||||||
if msg.LatencyMS != 2 { |
|
||||||
t.Errorf("Expected 2ms processing latency, got %dms", msg.LatencyMS) |
|
||||||
} |
|
||||||
if msg.Type != "PROCESS" { |
|
||||||
t.Errorf("Expected PROCESS type, got %s", msg.Type) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMessageQueueLogic_CapacityLimit(t *testing.T) { |
|
||||||
mq := MessageQueueLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"queueCapacity": 2, // Small capacity
|
|
||||||
"retentionSeconds": 3600, |
|
||||||
"processingRate": 1, |
|
||||||
} |
|
||||||
|
|
||||||
// Add more messages than capacity
|
|
||||||
reqs := []*Request{ |
|
||||||
{ID: "msg1", Type: "SEND", LatencyMS: 0}, |
|
||||||
{ID: "msg2", Type: "SEND", LatencyMS: 0}, |
|
||||||
{ID: "msg3", Type: "SEND", LatencyMS: 0}, // This should be dropped
|
|
||||||
} |
|
||||||
|
|
||||||
output, healthy := mq.Tick(props, reqs, 1) |
|
||||||
|
|
||||||
// Queue should be healthy (can still process messages)
|
|
||||||
if !healthy { |
|
||||||
t.Errorf("Queue should be healthy (can still process)") |
|
||||||
} |
|
||||||
|
|
||||||
// Should have no immediate output (messages queued)
|
|
||||||
if len(output) != 0 { |
|
||||||
t.Errorf("Expected 0 immediate output, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check queue size
|
|
||||||
messageQueue, _ := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if len(messageQueue) != 2 { |
|
||||||
t.Errorf("Expected 2 messages in queue (capacity limit), got %d", len(messageQueue)) |
|
||||||
} |
|
||||||
|
|
||||||
// Add another message when queue is full
|
|
||||||
reqs2 := []*Request{{ID: "msg4", Type: "SEND", LatencyMS: 0}} |
|
||||||
output2, healthy2 := mq.Tick(props, reqs2, 2) |
|
||||||
|
|
||||||
// Queue should still be healthy (can process messages)
|
|
||||||
if !healthy2 { |
|
||||||
t.Errorf("Queue should remain healthy (can still process)") |
|
||||||
} |
|
||||||
|
|
||||||
// Should have 1 processed message (processingRate = 1)
|
|
||||||
if len(output2) != 1 { |
|
||||||
t.Errorf("Expected 1 processed message, got %d", len(output2)) |
|
||||||
} |
|
||||||
|
|
||||||
// Queue should have 2 messages (started with 2, processed 1 leaving 1, added 1 new since space available)
|
|
||||||
messageQueue2, _ := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if len(messageQueue2) != 2 { |
|
||||||
t.Errorf("Expected 2 messages in queue (1 remaining + 1 new), got %d", len(messageQueue2)) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMessageQueueLogic_ProcessingRate(t *testing.T) { |
|
||||||
mq := MessageQueueLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"queueCapacity": 100, |
|
||||||
"retentionSeconds": 3600, |
|
||||||
"processingRate": 3, // Process 3 messages per tick
|
|
||||||
} |
|
||||||
|
|
||||||
// Add 10 messages
|
|
||||||
reqs := []*Request{} |
|
||||||
for i := 0; i < 10; i++ { |
|
||||||
reqs = append(reqs, &Request{ID: "msg" + string(rune(i+'0')), Type: "SEND"}) |
|
||||||
} |
|
||||||
|
|
||||||
// First tick: queue all messages
|
|
||||||
mq.Tick(props, reqs, 1) |
|
||||||
|
|
||||||
// Second tick: process at rate limit
|
|
||||||
output, _ := mq.Tick(props, []*Request{}, 2) |
|
||||||
|
|
||||||
if len(output) != 3 { |
|
||||||
t.Errorf("Expected 3 processed messages (rate limit), got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check remaining queue size
|
|
||||||
messageQueue, _ := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if len(messageQueue) != 7 { |
|
||||||
t.Errorf("Expected 7 messages remaining in queue, got %d", len(messageQueue)) |
|
||||||
} |
|
||||||
|
|
||||||
// Third tick: process 3 more
|
|
||||||
output2, _ := mq.Tick(props, []*Request{}, 3) |
|
||||||
|
|
||||||
if len(output2) != 3 { |
|
||||||
t.Errorf("Expected 3 more processed messages, got %d", len(output2)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check remaining queue size
|
|
||||||
messageQueue2, _ := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if len(messageQueue2) != 4 { |
|
||||||
t.Errorf("Expected 4 messages remaining in queue, got %d", len(messageQueue2)) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMessageQueueLogic_MessageRetention(t *testing.T) { |
|
||||||
mq := MessageQueueLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"queueCapacity": 100, |
|
||||||
"retentionSeconds": 1, // 1 second retention
|
|
||||||
"processingRate": 0, // Don't process messages, just test retention
|
|
||||||
} |
|
||||||
|
|
||||||
// Add messages at tick 1
|
|
||||||
reqs := []*Request{ |
|
||||||
{ID: "msg1", Type: "SEND", Timestamp: 100}, |
|
||||||
{ID: "msg2", Type: "SEND", Timestamp: 100}, |
|
||||||
} |
|
||||||
|
|
||||||
mq.Tick(props, reqs, 1) |
|
||||||
|
|
||||||
// Check messages are queued
|
|
||||||
messageQueue, _ := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if len(messageQueue) != 2 { |
|
||||||
t.Errorf("Expected 2 messages in queue, got %d", len(messageQueue)) |
|
||||||
} |
|
||||||
|
|
||||||
// Tick at time that should expire messages (tick 20 = 2000ms, retention = 1000ms)
|
|
||||||
output, _ := mq.Tick(props, []*Request{}, 20) |
|
||||||
|
|
||||||
// Messages should be expired and removed
|
|
||||||
messageQueue2, _ := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if len(messageQueue2) != 0 { |
|
||||||
t.Errorf("Expected messages to be expired and removed, got %d", len(messageQueue2)) |
|
||||||
} |
|
||||||
|
|
||||||
// No output since processingRate = 0
|
|
||||||
if len(output) != 0 { |
|
||||||
t.Errorf("Expected no output with processingRate=0, got %d", len(output)) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMessageQueueLogic_FIFOOrdering(t *testing.T) { |
|
||||||
mq := MessageQueueLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"queueCapacity": 10, |
|
||||||
"retentionSeconds": 3600, |
|
||||||
"processingRate": 2, |
|
||||||
} |
|
||||||
|
|
||||||
// Add messages in order
|
|
||||||
reqs := []*Request{ |
|
||||||
{ID: "first", Type: "SEND"}, |
|
||||||
{ID: "second", Type: "SEND"}, |
|
||||||
{ID: "third", Type: "SEND"}, |
|
||||||
} |
|
||||||
|
|
||||||
mq.Tick(props, reqs, 1) |
|
||||||
|
|
||||||
// Process 2 messages
|
|
||||||
output, _ := mq.Tick(props, []*Request{}, 2) |
|
||||||
|
|
||||||
if len(output) != 2 { |
|
||||||
t.Errorf("Expected 2 processed messages, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check FIFO order
|
|
||||||
if output[0].ID != "first" { |
|
||||||
t.Errorf("Expected first message to be 'first', got '%s'", output[0].ID) |
|
||||||
} |
|
||||||
|
|
||||||
if output[1].ID != "second" { |
|
||||||
t.Errorf("Expected second message to be 'second', got '%s'", output[1].ID) |
|
||||||
} |
|
||||||
|
|
||||||
// Process remaining message
|
|
||||||
output2, _ := mq.Tick(props, []*Request{}, 3) |
|
||||||
|
|
||||||
if len(output2) != 1 { |
|
||||||
t.Errorf("Expected 1 remaining message, got %d", len(output2)) |
|
||||||
} |
|
||||||
|
|
||||||
if output2[0].ID != "third" { |
|
||||||
t.Errorf("Expected remaining message to be 'third', got '%s'", output2[0].ID) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMessageQueueLogic_DefaultValues(t *testing.T) { |
|
||||||
mq := MessageQueueLogic{} |
|
||||||
|
|
||||||
// Empty props should use defaults
|
|
||||||
props := map[string]any{} |
|
||||||
|
|
||||||
reqs := []*Request{{ID: "msg1", Type: "SEND"}} |
|
||||||
output, healthy := mq.Tick(props, reqs, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Errorf("Queue should be healthy with default values") |
|
||||||
} |
|
||||||
|
|
||||||
// Should queue the message (no immediate output)
|
|
||||||
if len(output) != 0 { |
|
||||||
t.Errorf("Expected message to be queued (0 output), got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check that message was queued with defaults
|
|
||||||
messageQueue, _ := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if len(messageQueue) != 1 { |
|
||||||
t.Errorf("Expected 1 message queued with defaults, got %d", len(messageQueue)) |
|
||||||
} |
|
||||||
|
|
||||||
// Process with defaults (should process up to default rate)
|
|
||||||
output2, _ := mq.Tick(props, []*Request{}, 2) |
|
||||||
|
|
||||||
if len(output2) != 1 { |
|
||||||
t.Errorf("Expected 1 processed message with defaults, got %d", len(output2)) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMessageQueueLogic_ContinuousFlow(t *testing.T) { |
|
||||||
mq := MessageQueueLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"queueCapacity": 5, |
|
||||||
"retentionSeconds": 3600, |
|
||||||
"processingRate": 2, |
|
||||||
} |
|
||||||
|
|
||||||
// Tick 1: Add 3 messages
|
|
||||||
reqs1 := []*Request{ |
|
||||||
{ID: "msg1", Type: "SEND"}, |
|
||||||
{ID: "msg2", Type: "SEND"}, |
|
||||||
{ID: "msg3", Type: "SEND"}, |
|
||||||
} |
|
||||||
output1, _ := mq.Tick(props, reqs1, 1) |
|
||||||
|
|
||||||
// Should queue all 3 messages
|
|
||||||
if len(output1) != 0 { |
|
||||||
t.Errorf("Expected 0 output on first tick, got %d", len(output1)) |
|
||||||
} |
|
||||||
|
|
||||||
// Tick 2: Add 2 more messages, process 2
|
|
||||||
reqs2 := []*Request{ |
|
||||||
{ID: "msg4", Type: "SEND"}, |
|
||||||
{ID: "msg5", Type: "SEND"}, |
|
||||||
} |
|
||||||
output2, _ := mq.Tick(props, reqs2, 2) |
|
||||||
|
|
||||||
// Should process 2 messages
|
|
||||||
if len(output2) != 2 { |
|
||||||
t.Errorf("Expected 2 processed messages, got %d", len(output2)) |
|
||||||
} |
|
||||||
|
|
||||||
// Should have 3 messages in queue (3 remaining + 2 new - 2 processed)
|
|
||||||
messageQueue, _ := props["_messageQueue"].([]QueuedMessage) |
|
||||||
if len(messageQueue) != 3 { |
|
||||||
t.Errorf("Expected 3 messages in queue, got %d", len(messageQueue)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check processing order
|
|
||||||
if output2[0].ID != "msg1" || output2[1].ID != "msg2" { |
|
||||||
t.Errorf("Expected FIFO processing order, got %s, %s", output2[0].ID, output2[1].ID) |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,241 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
import ( |
|
||||||
"fmt" |
|
||||||
"hash/fnv" |
|
||||||
"math" |
|
||||||
) |
|
||||||
|
|
||||||
type MicroserviceLogic struct{} |
|
||||||
|
|
||||||
type ServiceInstance struct { |
|
||||||
ID int |
|
||||||
CurrentLoad int |
|
||||||
HealthStatus string |
|
||||||
} |
|
||||||
|
|
||||||
// CacheEntry represents a cached item in the microservice's cache
|
|
||||||
type MicroserviceCacheEntry struct { |
|
||||||
Data string |
|
||||||
Timestamp int |
|
||||||
AccessTime int |
|
||||||
AccessCount int |
|
||||||
} |
|
||||||
|
|
||||||
// hash function for cache keys
|
|
||||||
func hashKey(s string) uint32 { |
|
||||||
h := fnv.New32a() |
|
||||||
h.Write([]byte(s)) |
|
||||||
return h.Sum32() |
|
||||||
} |
|
||||||
|
|
||||||
func (m MicroserviceLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) { |
|
||||||
// Extract microservice properties
|
|
||||||
instanceCount := int(AsFloat64(props["instanceCount"])) |
|
||||||
if instanceCount == 0 { |
|
||||||
instanceCount = 1 // default to 1 instance
|
|
||||||
} |
|
||||||
|
|
||||||
cpu := int(AsFloat64(props["cpu"])) |
|
||||||
if cpu == 0 { |
|
||||||
cpu = 2 // default 2 CPU cores
|
|
||||||
} |
|
||||||
|
|
||||||
ramGb := int(AsFloat64(props["ramGb"])) |
|
||||||
if ramGb == 0 { |
|
||||||
ramGb = 4 // default 4GB RAM
|
|
||||||
} |
|
||||||
|
|
||||||
rpsCapacity := int(AsFloat64(props["rpsCapacity"])) |
|
||||||
if rpsCapacity == 0 { |
|
||||||
rpsCapacity = 100 // default capacity per instance
|
|
||||||
} |
|
||||||
|
|
||||||
scalingStrategy := AsString(props["scalingStrategy"]) |
|
||||||
if scalingStrategy == "" { |
|
||||||
scalingStrategy = "auto" |
|
||||||
} |
|
||||||
|
|
||||||
// Calculate base latency based on resource specs
|
|
||||||
baseLatencyMs := m.calculateBaseLatency(cpu, ramGb) |
|
||||||
|
|
||||||
// Auto-scaling logic: adjust instance count based on load
|
|
||||||
currentLoad := len(queue) |
|
||||||
if scalingStrategy == "auto" { |
|
||||||
instanceCount = m.autoScale(instanceCount, currentLoad, rpsCapacity) |
|
||||||
props["instanceCount"] = float64(instanceCount) // update for next tick
|
|
||||||
} |
|
||||||
|
|
||||||
// Total capacity across all instances
|
|
||||||
totalCapacity := instanceCount * rpsCapacity |
|
||||||
|
|
||||||
// Process requests up to total capacity
|
|
||||||
toProcess := queue |
|
||||||
if len(queue) > totalCapacity { |
|
||||||
toProcess = queue[:totalCapacity] |
|
||||||
} |
|
||||||
|
|
||||||
// Initialize cache in microservice props
|
|
||||||
cache, ok := props["_microserviceCache"].(map[string]*MicroserviceCacheEntry) |
|
||||||
if !ok { |
|
||||||
cache = make(map[string]*MicroserviceCacheEntry) |
|
||||||
props["_microserviceCache"] = cache |
|
||||||
} |
|
||||||
|
|
||||||
cacheTTL := 300000 // 5 minutes default TTL
|
|
||||||
currentTime := tick * 100 // assuming 100ms per tick
|
|
||||||
|
|
||||||
output := []*Request{} // Only cache misses go here (forwarded to database)
|
|
||||||
cacheHits := []*Request{} // Cache hits - completed locally
|
|
||||||
dbRequests := []*Request{} // Requests that need to go to database
|
|
||||||
|
|
||||||
// Process each request with cache-aside logic
|
|
||||||
for i, req := range toProcess { |
|
||||||
// Generate cache key for this request (simulate URL patterns)
|
|
||||||
hashValue := hashKey(req.ID) % 100 // Create 100 possible "URLs"
|
|
||||||
cacheKey := fmt.Sprintf("url-%d-%s", hashValue, req.Type) |
|
||||||
|
|
||||||
// Check cache first (Cache-Aside pattern)
|
|
||||||
entry, hit := cache[cacheKey] |
|
||||||
if hit && !m.isCacheExpired(entry, currentTime, cacheTTL) { |
|
||||||
// CACHE HIT - serve from cache (NO DATABASE QUERY)
|
|
||||||
|
|
||||||
reqCopy := *req |
|
||||||
reqCopy.LatencyMS += 1 // 1ms for cache access
|
|
||||||
reqCopy.Path = append(reqCopy.Path, "microservice-cache-hit-completed") |
|
||||||
|
|
||||||
// Update cache access tracking
|
|
||||||
entry.AccessTime = currentTime |
|
||||||
entry.AccessCount++ |
|
||||||
|
|
||||||
// Cache hits do NOT go to database - they complete here
|
|
||||||
// In a real system, this response would go back to the client
|
|
||||||
// Store separately - these do NOT get forwarded to database
|
|
||||||
cacheHits = append(cacheHits, &reqCopy) |
|
||||||
|
|
||||||
} else { |
|
||||||
// CACHE MISS - need to query database
|
|
||||||
|
|
||||||
reqCopy := *req |
|
||||||
|
|
||||||
// Add microservice processing latency
|
|
||||||
processingLatency := baseLatencyMs |
|
||||||
|
|
||||||
// Simulate CPU-bound vs I/O-bound operations
|
|
||||||
if req.Type == "GET" { |
|
||||||
processingLatency = baseLatencyMs // Fast reads
|
|
||||||
} else if req.Type == "POST" || req.Type == "PUT" { |
|
||||||
processingLatency = baseLatencyMs + 10 // Writes take longer
|
|
||||||
} else if req.Type == "COMPUTE" { |
|
||||||
processingLatency = baseLatencyMs + 50 // CPU-intensive operations
|
|
||||||
} |
|
||||||
|
|
||||||
// Instance load affects latency (queuing delay)
|
|
||||||
instanceLoad := m.calculateInstanceLoad(i, len(toProcess), instanceCount) |
|
||||||
if float64(instanceLoad) > float64(rpsCapacity)*0.8 { // Above 80% capacity
|
|
||||||
processingLatency += int(float64(processingLatency) * 0.5) // 50% penalty
|
|
||||||
} |
|
||||||
|
|
||||||
reqCopy.LatencyMS += processingLatency |
|
||||||
reqCopy.Path = append(reqCopy.Path, "microservice-cache-miss") |
|
||||||
|
|
||||||
// Store cache key in request for when database response comes back
|
|
||||||
reqCopy.CacheKey = cacheKey |
|
||||||
|
|
||||||
// Forward to database for actual data
|
|
||||||
dbRequests = append(dbRequests, &reqCopy) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// For cache misses, we would normally wait for database response and then cache it
|
|
||||||
// In this simulation, we'll immediately cache the "result" for future requests
|
|
||||||
for _, req := range dbRequests { |
|
||||||
// Simulate caching the database response
|
|
||||||
cache[req.CacheKey] = &MicroserviceCacheEntry{ |
|
||||||
Data: "cached-response-data", |
|
||||||
Timestamp: currentTime, |
|
||||||
AccessTime: currentTime, |
|
||||||
AccessCount: 1, |
|
||||||
} |
|
||||||
|
|
||||||
// Forward request to database
|
|
||||||
output = append(output, req) |
|
||||||
} |
|
||||||
|
|
||||||
// Health check: service is healthy if not severely overloaded
|
|
||||||
healthy := len(queue) <= totalCapacity*2 // Allow some buffering
|
|
||||||
|
|
||||||
return output, healthy |
|
||||||
} |
|
||||||
|
|
||||||
// isCacheExpired checks if a cache entry has expired
|
|
||||||
func (m MicroserviceLogic) isCacheExpired(entry *MicroserviceCacheEntry, currentTime, ttl int) bool { |
|
||||||
return (currentTime - entry.Timestamp) > ttl |
|
||||||
} |
|
||||||
|
|
||||||
// calculateBaseLatency determines base processing time based on resources
|
|
||||||
func (m MicroserviceLogic) calculateBaseLatency(cpu, ramGb int) int { |
|
||||||
// Better CPU and RAM = lower base latency
|
|
||||||
// Formula: base latency inversely proportional to resources
|
|
||||||
cpuFactor := float64(cpu) |
|
||||||
ramFactor := float64(ramGb) / 4.0 // Normalize to 4GB baseline
|
|
||||||
|
|
||||||
resourceScore := cpuFactor * ramFactor |
|
||||||
if resourceScore < 1 { |
|
||||||
resourceScore = 1 |
|
||||||
} |
|
||||||
|
|
||||||
baseLatency := int(50.0 / resourceScore) // 50ms baseline for 2CPU/4GB
|
|
||||||
if baseLatency < 5 { |
|
||||||
baseLatency = 5 // Minimum 5ms processing time
|
|
||||||
} |
|
||||||
|
|
||||||
return baseLatency |
|
||||||
} |
|
||||||
|
|
||||||
// autoScale implements simple auto-scaling logic
|
|
||||||
func (m MicroserviceLogic) autoScale(currentInstances, currentLoad, rpsPerInstance int) int { |
|
||||||
// Calculate desired instances based on current load
|
|
||||||
desiredInstances := int(math.Ceil(float64(currentLoad) / float64(rpsPerInstance))) |
|
||||||
|
|
||||||
// Scale up/down gradually (max 25% change per tick)
|
|
||||||
maxChange := int(math.Max(1, float64(currentInstances)*0.25)) |
|
||||||
|
|
||||||
if desiredInstances > currentInstances { |
|
||||||
// Scale up
|
|
||||||
newInstances := currentInstances + maxChange |
|
||||||
if newInstances > desiredInstances { |
|
||||||
newInstances = desiredInstances |
|
||||||
} |
|
||||||
// Cap at reasonable maximum
|
|
||||||
if newInstances > 20 { |
|
||||||
newInstances = 20 |
|
||||||
} |
|
||||||
return newInstances |
|
||||||
} else if desiredInstances < currentInstances { |
|
||||||
// Scale down (more conservative)
|
|
||||||
newInstances := currentInstances - int(math.Max(1, float64(maxChange)*0.5)) |
|
||||||
if newInstances < desiredInstances { |
|
||||||
newInstances = desiredInstances |
|
||||||
} |
|
||||||
// Always maintain at least 1 instance
|
|
||||||
if newInstances < 1 { |
|
||||||
newInstances = 1 |
|
||||||
} |
|
||||||
return newInstances |
|
||||||
} |
|
||||||
|
|
||||||
return currentInstances |
|
||||||
} |
|
||||||
|
|
||||||
// calculateInstanceLoad estimates load on a specific instance
|
|
||||||
func (m MicroserviceLogic) calculateInstanceLoad(instanceID, totalRequests, instanceCount int) int { |
|
||||||
// Simple round-robin distribution
|
|
||||||
baseLoad := totalRequests / instanceCount |
|
||||||
remainder := totalRequests % instanceCount |
|
||||||
|
|
||||||
if instanceID < remainder { |
|
||||||
return baseLoad + 1 |
|
||||||
} |
|
||||||
return baseLoad |
|
||||||
} |
|
||||||
@ -1,286 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
import ( |
|
||||||
"testing" |
|
||||||
) |
|
||||||
|
|
||||||
func TestMicroserviceLogic_BasicProcessing(t *testing.T) { |
|
||||||
logic := MicroserviceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"instanceCount": 2.0, |
|
||||||
"cpu": 4.0, |
|
||||||
"ramGb": 8.0, |
|
||||||
"rpsCapacity": 100.0, |
|
||||||
"scalingStrategy": "manual", |
|
||||||
} |
|
||||||
|
|
||||||
requests := []*Request{ |
|
||||||
{ID: "1", Type: "GET", LatencyMS: 0, Path: []string{}}, |
|
||||||
{ID: "2", Type: "POST", LatencyMS: 0, Path: []string{}}, |
|
||||||
} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected microservice to be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 2 { |
|
||||||
t.Errorf("Expected 2 processed requests, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Verify latency was added
|
|
||||||
for _, req := range output { |
|
||||||
if req.LatencyMS == 0 { |
|
||||||
t.Error("Expected latency to be added to processed request") |
|
||||||
} |
|
||||||
if len(req.Path) == 0 || req.Path[len(req.Path)-1] != "microservice-processed" { |
|
||||||
t.Error("Expected path to be updated with microservice-processed") |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMicroserviceLogic_CapacityLimit(t *testing.T) { |
|
||||||
logic := MicroserviceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"instanceCount": 1.0, |
|
||||||
"rpsCapacity": 2.0, |
|
||||||
"scalingStrategy": "manual", |
|
||||||
} |
|
||||||
|
|
||||||
// Send 4 requests, capacity is 2 (1 instance * 2 RPS)
|
|
||||||
// This should be healthy since 4 <= totalCapacity*2 (4)
|
|
||||||
requests := make([]*Request, 4) |
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 0} |
|
||||||
} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected microservice to be healthy with moderate queuing") |
|
||||||
} |
|
||||||
|
|
||||||
// Should only process 2 requests (capacity limit)
|
|
||||||
if len(output) != 2 { |
|
||||||
t.Errorf("Expected 2 processed requests due to capacity limit, got %d", len(output)) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMicroserviceLogic_AutoScaling(t *testing.T) { |
|
||||||
logic := MicroserviceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"instanceCount": 1.0, |
|
||||||
"rpsCapacity": 10.0, |
|
||||||
"scalingStrategy": "auto", |
|
||||||
} |
|
||||||
|
|
||||||
// Send 25 requests to trigger scaling
|
|
||||||
requests := make([]*Request, 25) |
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 0} |
|
||||||
} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
// Check if instances were scaled up
|
|
||||||
newInstanceCount := int(props["instanceCount"].(float64)) |
|
||||||
if newInstanceCount <= 1 { |
|
||||||
t.Error("Expected auto-scaling to increase instance count") |
|
||||||
} |
|
||||||
|
|
||||||
// Should process more than 10 requests (original capacity)
|
|
||||||
if len(output) <= 10 { |
|
||||||
t.Errorf("Expected auto-scaling to increase processing capacity, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected microservice to be healthy after scaling") |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMicroserviceLogic_ResourceBasedLatency(t *testing.T) { |
|
||||||
logic := MicroserviceLogic{} |
|
||||||
|
|
||||||
// High-resource microservice
|
|
||||||
highResourceProps := map[string]any{ |
|
||||||
"instanceCount": 1.0, |
|
||||||
"cpu": 8.0, |
|
||||||
"ramGb": 16.0, |
|
||||||
"rpsCapacity": 100.0, |
|
||||||
"scalingStrategy": "manual", |
|
||||||
} |
|
||||||
|
|
||||||
// Low-resource microservice
|
|
||||||
lowResourceProps := map[string]any{ |
|
||||||
"instanceCount": 1.0, |
|
||||||
"cpu": 1.0, |
|
||||||
"ramGb": 1.0, |
|
||||||
"rpsCapacity": 100.0, |
|
||||||
"scalingStrategy": "manual", |
|
||||||
} |
|
||||||
|
|
||||||
request := []*Request{{ID: "1", Type: "GET", LatencyMS: 0, Path: []string{}}} |
|
||||||
|
|
||||||
highOutput, _ := logic.Tick(highResourceProps, request, 1) |
|
||||||
lowOutput, _ := logic.Tick(lowResourceProps, request, 1) |
|
||||||
|
|
||||||
highLatency := highOutput[0].LatencyMS |
|
||||||
lowLatency := lowOutput[0].LatencyMS |
|
||||||
|
|
||||||
if lowLatency <= highLatency { |
|
||||||
t.Errorf("Expected low-resource microservice (%dms) to have higher latency than high-resource (%dms)", |
|
||||||
lowLatency, highLatency) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMicroserviceLogic_RequestTypeLatency(t *testing.T) { |
|
||||||
logic := MicroserviceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"instanceCount": 1.0, |
|
||||||
"cpu": 2.0, |
|
||||||
"ramGb": 4.0, |
|
||||||
"rpsCapacity": 100.0, |
|
||||||
"scalingStrategy": "manual", |
|
||||||
} |
|
||||||
|
|
||||||
getRequest := []*Request{{ID: "1", Type: "GET", LatencyMS: 0, Path: []string{}}} |
|
||||||
postRequest := []*Request{{ID: "2", Type: "POST", LatencyMS: 0, Path: []string{}}} |
|
||||||
computeRequest := []*Request{{ID: "3", Type: "COMPUTE", LatencyMS: 0, Path: []string{}}} |
|
||||||
|
|
||||||
getOutput, _ := logic.Tick(props, getRequest, 1) |
|
||||||
postOutput, _ := logic.Tick(props, postRequest, 1) |
|
||||||
computeOutput, _ := logic.Tick(props, computeRequest, 1) |
|
||||||
|
|
||||||
getLatency := getOutput[0].LatencyMS |
|
||||||
postLatency := postOutput[0].LatencyMS |
|
||||||
computeLatency := computeOutput[0].LatencyMS |
|
||||||
|
|
||||||
if getLatency >= postLatency { |
|
||||||
t.Errorf("Expected GET (%dms) to be faster than POST (%dms)", getLatency, postLatency) |
|
||||||
} |
|
||||||
|
|
||||||
if postLatency >= computeLatency { |
|
||||||
t.Errorf("Expected POST (%dms) to be faster than COMPUTE (%dms)", postLatency, computeLatency) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMicroserviceLogic_HighLoadLatencyPenalty(t *testing.T) { |
|
||||||
logic := MicroserviceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"instanceCount": 1.0, |
|
||||||
"cpu": 2.0, |
|
||||||
"ramGb": 4.0, |
|
||||||
"rpsCapacity": 10.0, |
|
||||||
"scalingStrategy": "manual", |
|
||||||
} |
|
||||||
|
|
||||||
// Low load scenario
|
|
||||||
lowLoadRequest := []*Request{{ID: "1", Type: "GET", LatencyMS: 0, Path: []string{}}} |
|
||||||
lowOutput, _ := logic.Tick(props, lowLoadRequest, 1) |
|
||||||
lowLatency := lowOutput[0].LatencyMS |
|
||||||
|
|
||||||
// High load scenario (above 80% capacity threshold)
|
|
||||||
highLoadRequests := make([]*Request, 9) // 90% of 10 RPS capacity
|
|
||||||
for i := range highLoadRequests { |
|
||||||
highLoadRequests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 0, Path: []string{}} |
|
||||||
} |
|
||||||
highOutput, _ := logic.Tick(props, highLoadRequests, 1) |
|
||||||
|
|
||||||
// Check if first request has higher latency due to load
|
|
||||||
highLatency := highOutput[0].LatencyMS |
|
||||||
|
|
||||||
if highLatency <= lowLatency { |
|
||||||
t.Errorf("Expected high load scenario (%dms) to have higher latency than low load (%dms)", |
|
||||||
highLatency, lowLatency) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMicroserviceLogic_DefaultValues(t *testing.T) { |
|
||||||
logic := MicroserviceLogic{} |
|
||||||
|
|
||||||
// Empty props should use defaults
|
|
||||||
props := map[string]any{} |
|
||||||
|
|
||||||
requests := []*Request{{ID: "1", Type: "GET", LatencyMS: 0, Path: []string{}}} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected microservice to be healthy with default values") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 1 { |
|
||||||
t.Errorf("Expected 1 processed request with defaults, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Should have reasonable default latency
|
|
||||||
if output[0].LatencyMS <= 0 || output[0].LatencyMS > 100 { |
|
||||||
t.Errorf("Expected reasonable default latency, got %dms", output[0].LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMicroserviceLogic_UnhealthyWhenOverloaded(t *testing.T) { |
|
||||||
logic := MicroserviceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"instanceCount": 1.0, |
|
||||||
"rpsCapacity": 5.0, |
|
||||||
"scalingStrategy": "manual", // No auto-scaling
|
|
||||||
} |
|
||||||
|
|
||||||
// Send way more requests than capacity (5 * 2 = 10 max before unhealthy)
|
|
||||||
requests := make([]*Request, 15) // 3x capacity
|
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 0} |
|
||||||
} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if healthy { |
|
||||||
t.Error("Expected microservice to be unhealthy when severely overloaded") |
|
||||||
} |
|
||||||
|
|
||||||
// Should still process up to capacity
|
|
||||||
if len(output) != 5 { |
|
||||||
t.Errorf("Expected 5 processed requests despite being overloaded, got %d", len(output)) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMicroserviceLogic_RoundRobinDistribution(t *testing.T) { |
|
||||||
logic := MicroserviceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"instanceCount": 3.0, |
|
||||||
"rpsCapacity": 10.0, |
|
||||||
"scalingStrategy": "manual", |
|
||||||
} |
|
||||||
|
|
||||||
// Send 6 requests to be distributed across 3 instances
|
|
||||||
requests := make([]*Request, 6) |
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 0, Path: []string{}} |
|
||||||
} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected microservice to be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 6 { |
|
||||||
t.Errorf("Expected 6 processed requests, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// All requests should be processed (within total capacity of 30)
|
|
||||||
for _, req := range output { |
|
||||||
if req.LatencyMS <= 0 { |
|
||||||
t.Error("Expected all requests to have added latency") |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,221 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
type MonitoringLogic struct{} |
|
||||||
|
|
||||||
type MetricData struct { |
|
||||||
Timestamp int |
|
||||||
LatencySum int |
|
||||||
RequestCount int |
|
||||||
ErrorCount int |
|
||||||
QueueSize int |
|
||||||
} |
|
||||||
|
|
||||||
type AlertEvent struct { |
|
||||||
Timestamp int |
|
||||||
MetricType string |
|
||||||
Value float64 |
|
||||||
Threshold float64 |
|
||||||
Unit string |
|
||||||
Severity string |
|
||||||
} |
|
||||||
|
|
||||||
func (m MonitoringLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) { |
|
||||||
// Extract monitoring properties
|
|
||||||
tool := AsString(props["tool"]) |
|
||||||
if tool == "" { |
|
||||||
tool = "Prometheus" // default monitoring tool
|
|
||||||
} |
|
||||||
|
|
||||||
alertMetric := AsString(props["alertMetric"]) |
|
||||||
if alertMetric == "" { |
|
||||||
alertMetric = "latency" // default to latency monitoring
|
|
||||||
} |
|
||||||
|
|
||||||
thresholdValue := int(AsFloat64(props["thresholdValue"])) |
|
||||||
if thresholdValue == 0 { |
|
||||||
thresholdValue = 100 // default threshold
|
|
||||||
} |
|
||||||
|
|
||||||
thresholdUnit := AsString(props["thresholdUnit"]) |
|
||||||
if thresholdUnit == "" { |
|
||||||
thresholdUnit = "ms" // default unit
|
|
||||||
} |
|
||||||
|
|
||||||
// Get historical metrics from props
|
|
||||||
metrics, ok := props["_metrics"].([]MetricData) |
|
||||||
if !ok { |
|
||||||
metrics = []MetricData{} |
|
||||||
} |
|
||||||
|
|
||||||
// Get alert history
|
|
||||||
alerts, ok := props["_alerts"].([]AlertEvent) |
|
||||||
if !ok { |
|
||||||
alerts = []AlertEvent{} |
|
||||||
} |
|
||||||
|
|
||||||
currentTime := tick * 100 // Convert tick to milliseconds
|
|
||||||
|
|
||||||
// Process all incoming requests (monitoring is pass-through)
|
|
||||||
output := []*Request{} |
|
||||||
totalLatency := 0 |
|
||||||
errorCount := 0 |
|
||||||
|
|
||||||
for _, req := range queue { |
|
||||||
// Create a copy of the request to forward
|
|
||||||
reqCopy := *req |
|
||||||
|
|
||||||
// Add minimal monitoring overhead (1-2ms for metric collection)
|
|
||||||
monitoringOverhead := 1 |
|
||||||
if tool == "Datadog" || tool == "New Relic" { |
|
||||||
monitoringOverhead = 2 // More feature-rich tools have slightly higher overhead
|
|
||||||
} |
|
||||||
|
|
||||||
reqCopy.LatencyMS += monitoringOverhead |
|
||||||
reqCopy.Path = append(reqCopy.Path, "monitored") |
|
||||||
|
|
||||||
// Collect metrics from the request
|
|
||||||
totalLatency += req.LatencyMS |
|
||||||
|
|
||||||
// Simple heuristic: requests with high latency are considered errors
|
|
||||||
if req.LatencyMS > 1000 { // 1 second threshold for errors
|
|
||||||
errorCount++ |
|
||||||
} |
|
||||||
|
|
||||||
output = append(output, &reqCopy) |
|
||||||
} |
|
||||||
|
|
||||||
// Calculate current metrics
|
|
||||||
avgLatency := 0.0 |
|
||||||
if len(queue) > 0 { |
|
||||||
avgLatency = float64(totalLatency) / float64(len(queue)) |
|
||||||
} |
|
||||||
|
|
||||||
// Store current metrics
|
|
||||||
currentMetric := MetricData{ |
|
||||||
Timestamp: currentTime, |
|
||||||
LatencySum: totalLatency, |
|
||||||
RequestCount: len(queue), |
|
||||||
ErrorCount: errorCount, |
|
||||||
QueueSize: len(queue), |
|
||||||
} |
|
||||||
|
|
||||||
// Add to metrics history (keep last 10 data points)
|
|
||||||
metrics = append(metrics, currentMetric) |
|
||||||
if len(metrics) > 10 { |
|
||||||
metrics = metrics[1:] |
|
||||||
} |
|
||||||
|
|
||||||
// Check alert conditions
|
|
||||||
shouldAlert := false |
|
||||||
alertValue := 0.0 |
|
||||||
|
|
||||||
switch alertMetric { |
|
||||||
case "latency": |
|
||||||
alertValue = avgLatency |
|
||||||
if avgLatency > float64(thresholdValue) && len(queue) > 0 { |
|
||||||
shouldAlert = true |
|
||||||
} |
|
||||||
case "throughput": |
|
||||||
alertValue = float64(len(queue)) |
|
||||||
if len(queue) < thresholdValue { // Low throughput alert
|
|
||||||
shouldAlert = true |
|
||||||
} |
|
||||||
case "error_rate": |
|
||||||
errorRate := 0.0 |
|
||||||
if len(queue) > 0 { |
|
||||||
errorRate = float64(errorCount) / float64(len(queue)) * 100 |
|
||||||
} |
|
||||||
alertValue = errorRate |
|
||||||
if errorRate > float64(thresholdValue) { |
|
||||||
shouldAlert = true |
|
||||||
} |
|
||||||
case "queue_size": |
|
||||||
alertValue = float64(len(queue)) |
|
||||||
if len(queue) > thresholdValue { |
|
||||||
shouldAlert = true |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Generate alert if threshold exceeded
|
|
||||||
if shouldAlert { |
|
||||||
severity := "warning" |
|
||||||
if alertValue > float64(thresholdValue)*1.5 { // 150% of threshold
|
|
||||||
severity = "critical" |
|
||||||
} |
|
||||||
|
|
||||||
alert := AlertEvent{ |
|
||||||
Timestamp: currentTime, |
|
||||||
MetricType: alertMetric, |
|
||||||
Value: alertValue, |
|
||||||
Threshold: float64(thresholdValue), |
|
||||||
Unit: thresholdUnit, |
|
||||||
Severity: severity, |
|
||||||
} |
|
||||||
|
|
||||||
// Only add alert if it's not a duplicate of the last alert
|
|
||||||
if len(alerts) == 0 || !m.isDuplicateAlert(alerts[len(alerts)-1], alert) { |
|
||||||
alerts = append(alerts, alert) |
|
||||||
} |
|
||||||
|
|
||||||
// Keep only last 20 alerts
|
|
||||||
if len(alerts) > 20 { |
|
||||||
alerts = alerts[1:] |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Update props with collected data
|
|
||||||
props["_metrics"] = metrics |
|
||||||
props["_alerts"] = alerts |
|
||||||
props["_currentLatency"] = avgLatency |
|
||||||
props["_alertCount"] = len(alerts) |
|
||||||
|
|
||||||
// Monitoring system health - it's healthy unless it's completely overloaded
|
|
||||||
healthy := len(queue) < 10000 // Can handle very high loads
|
|
||||||
|
|
||||||
// If too many critical alerts recently, mark as unhealthy
|
|
||||||
recentCriticalAlerts := 0 |
|
||||||
for _, alert := range alerts { |
|
||||||
if currentTime-alert.Timestamp < 10000 && alert.Severity == "critical" { // Last 10 seconds
|
|
||||||
recentCriticalAlerts++ |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if recentCriticalAlerts > 5 { |
|
||||||
healthy = false |
|
||||||
} |
|
||||||
|
|
||||||
return output, healthy |
|
||||||
} |
|
||||||
|
|
||||||
// isDuplicateAlert checks if an alert is similar to the previous one to avoid spam
|
|
||||||
func (m MonitoringLogic) isDuplicateAlert(prev, current AlertEvent) bool { |
|
||||||
return prev.MetricType == current.MetricType && |
|
||||||
prev.Severity == current.Severity && |
|
||||||
(current.Timestamp-prev.Timestamp) < 5000 // Within 5 seconds
|
|
||||||
} |
|
||||||
|
|
||||||
// Helper function to calculate moving average
|
|
||||||
func (m MonitoringLogic) calculateMovingAverage(metrics []MetricData, window int) float64 { |
|
||||||
if len(metrics) == 0 { |
|
||||||
return 0 |
|
||||||
} |
|
||||||
|
|
||||||
start := 0 |
|
||||||
if len(metrics) > window { |
|
||||||
start = len(metrics) - window |
|
||||||
} |
|
||||||
|
|
||||||
sum := 0.0 |
|
||||||
count := 0 |
|
||||||
for i := start; i < len(metrics); i++ { |
|
||||||
if metrics[i].RequestCount > 0 { |
|
||||||
sum += float64(metrics[i].LatencySum) / float64(metrics[i].RequestCount) |
|
||||||
count++ |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if count == 0 { |
|
||||||
return 0 |
|
||||||
} |
|
||||||
return sum / float64(count) |
|
||||||
} |
|
||||||
@ -1,411 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
import ( |
|
||||||
"testing" |
|
||||||
) |
|
||||||
|
|
||||||
func TestMonitoringLogic_BasicPassthrough(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"tool": "Prometheus", |
|
||||||
"alertMetric": "latency", |
|
||||||
"thresholdValue": 100.0, |
|
||||||
"thresholdUnit": "ms", |
|
||||||
} |
|
||||||
|
|
||||||
requests := []*Request{ |
|
||||||
{ID: "1", Type: "GET", LatencyMS: 50, Path: []string{}}, |
|
||||||
{ID: "2", Type: "POST", LatencyMS: 75, Path: []string{}}, |
|
||||||
} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected monitoring to be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 2 { |
|
||||||
t.Errorf("Expected 2 requests to pass through monitoring, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Verify minimal latency overhead was added
|
|
||||||
for i, req := range output { |
|
||||||
originalLatency := requests[i].LatencyMS |
|
||||||
if req.LatencyMS <= originalLatency { |
|
||||||
t.Errorf("Expected monitoring overhead to be added to latency") |
|
||||||
} |
|
||||||
if req.LatencyMS > originalLatency+5 { |
|
||||||
t.Errorf("Expected minimal monitoring overhead, got %d ms added", req.LatencyMS-originalLatency) |
|
||||||
} |
|
||||||
if len(req.Path) == 0 || req.Path[len(req.Path)-1] != "monitored" { |
|
||||||
t.Error("Expected path to be updated with 'monitored'") |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMonitoringLogic_MetricsCollection(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"tool": "Datadog", |
|
||||||
"alertMetric": "latency", |
|
||||||
"thresholdValue": 100.0, |
|
||||||
"thresholdUnit": "ms", |
|
||||||
} |
|
||||||
|
|
||||||
requests := []*Request{ |
|
||||||
{ID: "1", Type: "GET", LatencyMS: 50}, |
|
||||||
{ID: "2", Type: "POST", LatencyMS: 150}, |
|
||||||
{ID: "3", Type: "GET", LatencyMS: 75}, |
|
||||||
} |
|
||||||
|
|
||||||
_, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected monitoring to be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
// Check that metrics were collected
|
|
||||||
metrics, ok := props["_metrics"].([]MetricData) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected metrics to be collected in props") |
|
||||||
} |
|
||||||
|
|
||||||
if len(metrics) != 1 { |
|
||||||
t.Errorf("Expected 1 metric data point, got %d", len(metrics)) |
|
||||||
} |
|
||||||
|
|
||||||
metric := metrics[0] |
|
||||||
if metric.RequestCount != 3 { |
|
||||||
t.Errorf("Expected 3 requests counted, got %d", metric.RequestCount) |
|
||||||
} |
|
||||||
|
|
||||||
if metric.LatencySum != 275 { // 50 + 150 + 75
|
|
||||||
t.Errorf("Expected latency sum of 275, got %d", metric.LatencySum) |
|
||||||
} |
|
||||||
|
|
||||||
// Check current latency calculation
|
|
||||||
currentLatency, ok := props["_currentLatency"].(float64) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected current latency to be calculated") |
|
||||||
} |
|
||||||
|
|
||||||
if currentLatency < 90 || currentLatency > 95 { |
|
||||||
t.Errorf("Expected average latency around 91.67, got %f", currentLatency) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMonitoringLogic_LatencyAlert(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"tool": "Prometheus", |
|
||||||
"alertMetric": "latency", |
|
||||||
"thresholdValue": 80.0, |
|
||||||
"thresholdUnit": "ms", |
|
||||||
} |
|
||||||
|
|
||||||
// Send requests that exceed latency threshold
|
|
||||||
requests := []*Request{ |
|
||||||
{ID: "1", Type: "GET", LatencyMS: 100}, |
|
||||||
{ID: "2", Type: "POST", LatencyMS: 120}, |
|
||||||
} |
|
||||||
|
|
||||||
_, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected monitoring to be healthy despite alerts") |
|
||||||
} |
|
||||||
|
|
||||||
// Check that alert was generated
|
|
||||||
alerts, ok := props["_alerts"].([]AlertEvent) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected alerts to be stored in props") |
|
||||||
} |
|
||||||
|
|
||||||
if len(alerts) != 1 { |
|
||||||
t.Errorf("Expected 1 alert to be generated, got %d", len(alerts)) |
|
||||||
} |
|
||||||
|
|
||||||
alert := alerts[0] |
|
||||||
if alert.MetricType != "latency" { |
|
||||||
t.Errorf("Expected latency alert, got %s", alert.MetricType) |
|
||||||
} |
|
||||||
|
|
||||||
if alert.Threshold != 80.0 { |
|
||||||
t.Errorf("Expected threshold of 80, got %f", alert.Threshold) |
|
||||||
} |
|
||||||
|
|
||||||
if alert.Value < 80.0 { |
|
||||||
t.Errorf("Expected alert value to exceed threshold, got %f", alert.Value) |
|
||||||
} |
|
||||||
|
|
||||||
if alert.Severity != "warning" { |
|
||||||
t.Errorf("Expected warning severity, got %s", alert.Severity) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMonitoringLogic_ErrorRateAlert(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"tool": "Prometheus", |
|
||||||
"alertMetric": "error_rate", |
|
||||||
"thresholdValue": 20.0, // 20% error rate threshold
|
|
||||||
"thresholdUnit": "percent", |
|
||||||
} |
|
||||||
|
|
||||||
// Send mix of normal and high-latency (error) requests
|
|
||||||
requests := []*Request{ |
|
||||||
{ID: "1", Type: "GET", LatencyMS: 100}, // normal
|
|
||||||
{ID: "2", Type: "POST", LatencyMS: 1200}, // error (>1000ms)
|
|
||||||
{ID: "3", Type: "GET", LatencyMS: 200}, // normal
|
|
||||||
{ID: "4", Type: "POST", LatencyMS: 1500}, // error
|
|
||||||
} |
|
||||||
|
|
||||||
_, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected monitoring to be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
// Check that error rate alert was generated (50% error rate > 20% threshold)
|
|
||||||
alerts, ok := props["_alerts"].([]AlertEvent) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected alerts to be stored in props") |
|
||||||
} |
|
||||||
|
|
||||||
if len(alerts) != 1 { |
|
||||||
t.Errorf("Expected 1 alert to be generated, got %d", len(alerts)) |
|
||||||
} |
|
||||||
|
|
||||||
alert := alerts[0] |
|
||||||
if alert.MetricType != "error_rate" { |
|
||||||
t.Errorf("Expected error_rate alert, got %s", alert.MetricType) |
|
||||||
} |
|
||||||
|
|
||||||
if alert.Value != 50.0 { // 2 errors out of 4 requests = 50%
|
|
||||||
t.Errorf("Expected 50%% error rate, got %f", alert.Value) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMonitoringLogic_QueueSizeAlert(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"tool": "Prometheus", |
|
||||||
"alertMetric": "queue_size", |
|
||||||
"thresholdValue": 5.0, |
|
||||||
"thresholdUnit": "requests", |
|
||||||
} |
|
||||||
|
|
||||||
// Send more requests than threshold
|
|
||||||
requests := make([]*Request, 8) |
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + i)), Type: "GET", LatencyMS: 50} |
|
||||||
} |
|
||||||
|
|
||||||
_, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected monitoring to be healthy with queue size alert") |
|
||||||
} |
|
||||||
|
|
||||||
// Check that queue size alert was generated
|
|
||||||
alerts, ok := props["_alerts"].([]AlertEvent) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected alerts to be stored in props") |
|
||||||
} |
|
||||||
|
|
||||||
if len(alerts) != 1 { |
|
||||||
t.Errorf("Expected 1 alert to be generated, got %d", len(alerts)) |
|
||||||
} |
|
||||||
|
|
||||||
alert := alerts[0] |
|
||||||
if alert.MetricType != "queue_size" { |
|
||||||
t.Errorf("Expected queue_size alert, got %s", alert.MetricType) |
|
||||||
} |
|
||||||
|
|
||||||
if alert.Value != 8.0 { |
|
||||||
t.Errorf("Expected queue size of 8, got %f", alert.Value) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMonitoringLogic_CriticalAlert(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"tool": "Prometheus", |
|
||||||
"alertMetric": "latency", |
|
||||||
"thresholdValue": 100.0, |
|
||||||
"thresholdUnit": "ms", |
|
||||||
} |
|
||||||
|
|
||||||
// Send requests with very high latency (150% of threshold)
|
|
||||||
requests := []*Request{ |
|
||||||
{ID: "1", Type: "GET", LatencyMS: 180}, // 180 > 150 (1.5 * 100)
|
|
||||||
{ID: "2", Type: "POST", LatencyMS: 200}, |
|
||||||
} |
|
||||||
|
|
||||||
_, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected monitoring to be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
alerts, ok := props["_alerts"].([]AlertEvent) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected alerts to be stored in props") |
|
||||||
} |
|
||||||
|
|
||||||
if len(alerts) != 1 { |
|
||||||
t.Errorf("Expected 1 alert to be generated, got %d", len(alerts)) |
|
||||||
} |
|
||||||
|
|
||||||
alert := alerts[0] |
|
||||||
if alert.Severity != "critical" { |
|
||||||
t.Errorf("Expected critical severity for high threshold breach, got %s", alert.Severity) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMonitoringLogic_DuplicateAlertSuppression(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"tool": "Prometheus", |
|
||||||
"alertMetric": "latency", |
|
||||||
"thresholdValue": 80.0, |
|
||||||
"thresholdUnit": "ms", |
|
||||||
} |
|
||||||
|
|
||||||
requests := []*Request{ |
|
||||||
{ID: "1", Type: "GET", LatencyMS: 100}, |
|
||||||
} |
|
||||||
|
|
||||||
// First tick - should generate alert
|
|
||||||
logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
alerts, _ := props["_alerts"].([]AlertEvent) |
|
||||||
if len(alerts) != 1 { |
|
||||||
t.Errorf("Expected 1 alert after first tick, got %d", len(alerts)) |
|
||||||
} |
|
||||||
|
|
||||||
// Second tick immediately after - should suppress duplicate
|
|
||||||
logic.Tick(props, requests, 2) |
|
||||||
|
|
||||||
alerts, _ = props["_alerts"].([]AlertEvent) |
|
||||||
if len(alerts) != 1 { |
|
||||||
t.Errorf("Expected duplicate alert to be suppressed, got %d alerts", len(alerts)) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMonitoringLogic_DefaultValues(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
// Empty props should use defaults
|
|
||||||
props := map[string]any{} |
|
||||||
|
|
||||||
requests := []*Request{{ID: "1", Type: "GET", LatencyMS: 50, Path: []string{}}} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected monitoring to be healthy with default values") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 1 { |
|
||||||
t.Errorf("Expected 1 request to pass through, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Should have reasonable default monitoring overhead
|
|
||||||
if output[0].LatencyMS <= 50 || output[0].LatencyMS > 55 { |
|
||||||
t.Errorf("Expected default monitoring overhead, got %dms total", output[0].LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMonitoringLogic_ToolSpecificOverhead(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
// Test Prometheus (lower overhead)
|
|
||||||
propsPrometheus := map[string]any{ |
|
||||||
"tool": "Prometheus", |
|
||||||
} |
|
||||||
|
|
||||||
// Test Datadog (higher overhead)
|
|
||||||
propsDatadog := map[string]any{ |
|
||||||
"tool": "Datadog", |
|
||||||
} |
|
||||||
|
|
||||||
request := []*Request{{ID: "1", Type: "GET", LatencyMS: 50, Path: []string{}}} |
|
||||||
|
|
||||||
prometheusOutput, _ := logic.Tick(propsPrometheus, request, 1) |
|
||||||
datadogOutput, _ := logic.Tick(propsDatadog, request, 1) |
|
||||||
|
|
||||||
prometheusOverhead := prometheusOutput[0].LatencyMS - 50 |
|
||||||
datadogOverhead := datadogOutput[0].LatencyMS - 50 |
|
||||||
|
|
||||||
if datadogOverhead <= prometheusOverhead { |
|
||||||
t.Errorf("Expected Datadog (%dms) to have higher overhead than Prometheus (%dms)", |
|
||||||
datadogOverhead, prometheusOverhead) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMonitoringLogic_UnhealthyWithManyAlerts(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"tool": "Prometheus", |
|
||||||
"alertMetric": "latency", |
|
||||||
"thresholdValue": 50.0, |
|
||||||
"thresholdUnit": "ms", |
|
||||||
} |
|
||||||
|
|
||||||
// Manually create many recent critical alerts to simulate an unhealthy state
|
|
||||||
currentTime := 10000 // 10 seconds
|
|
||||||
recentAlerts := []AlertEvent{ |
|
||||||
{Timestamp: currentTime - 1000, MetricType: "latency", Severity: "critical", Value: 200}, |
|
||||||
{Timestamp: currentTime - 2000, MetricType: "latency", Severity: "critical", Value: 180}, |
|
||||||
{Timestamp: currentTime - 3000, MetricType: "latency", Severity: "critical", Value: 190}, |
|
||||||
{Timestamp: currentTime - 4000, MetricType: "latency", Severity: "critical", Value: 170}, |
|
||||||
{Timestamp: currentTime - 5000, MetricType: "latency", Severity: "critical", Value: 160}, |
|
||||||
{Timestamp: currentTime - 6000, MetricType: "latency", Severity: "critical", Value: 150}, |
|
||||||
} |
|
||||||
|
|
||||||
// Set up the props with existing critical alerts
|
|
||||||
props["_alerts"] = recentAlerts |
|
||||||
|
|
||||||
// Make a request that would trigger another alert (low latency to avoid triggering new alert)
|
|
||||||
requests := []*Request{{ID: "1", Type: "GET", LatencyMS: 40}} |
|
||||||
|
|
||||||
// This tick should recognize the existing critical alerts and mark system as unhealthy
|
|
||||||
_, healthy := logic.Tick(props, requests, 100) // tick 100 = 10000ms
|
|
||||||
|
|
||||||
if healthy { |
|
||||||
t.Error("Expected monitoring to be unhealthy due to many recent critical alerts") |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestMonitoringLogic_MetricsHistoryLimit(t *testing.T) { |
|
||||||
logic := MonitoringLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"tool": "Prometheus", |
|
||||||
} |
|
||||||
|
|
||||||
request := []*Request{{ID: "1", Type: "GET", LatencyMS: 50}} |
|
||||||
|
|
||||||
// Generate more than 10 metric data points
|
|
||||||
for i := 0; i < 15; i++ { |
|
||||||
logic.Tick(props, request, i) |
|
||||||
} |
|
||||||
|
|
||||||
metrics, ok := props["_metrics"].([]MetricData) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected metrics to be stored") |
|
||||||
} |
|
||||||
|
|
||||||
if len(metrics) != 10 { |
|
||||||
t.Errorf("Expected metrics history to be limited to 10, got %d", len(metrics)) |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,55 +0,0 @@ |
|||||||
{ |
|
||||||
"nodes": [ |
|
||||||
{ |
|
||||||
"id": "webserver", |
|
||||||
"type": "webserver", |
|
||||||
"position": { "x": 0, "y": 0 }, |
|
||||||
"props": { |
|
||||||
"label": "Web Server", |
|
||||||
"rpsCapacity": 100 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "cache", |
|
||||||
"type": "cache", |
|
||||||
"position": { "x": 100, "y": 0 }, |
|
||||||
"props": { |
|
||||||
"label": "Redis Cache", |
|
||||||
"cacheTTL": 300000, |
|
||||||
"maxEntries": 1000, |
|
||||||
"evictionPolicy": "LRU" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "database", |
|
||||||
"type": "database", |
|
||||||
"position": { "x": 200, "y": 0 }, |
|
||||||
"props": { |
|
||||||
"label": "Primary DB", |
|
||||||
"replication": 2, |
|
||||||
"maxRPS": 500, |
|
||||||
"baseLatencyMs": 20 |
|
||||||
} |
|
||||||
} |
|
||||||
], |
|
||||||
"connections": [ |
|
||||||
{ |
|
||||||
"source": "webserver", |
|
||||||
"target": "cache", |
|
||||||
"label": "Cache Lookup", |
|
||||||
"direction": "forward", |
|
||||||
"protocol": "Redis", |
|
||||||
"tls": false, |
|
||||||
"capacity": 1000 |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "cache", |
|
||||||
"target": "database", |
|
||||||
"label": "Cache Miss", |
|
||||||
"direction": "forward", |
|
||||||
"protocol": "TCP", |
|
||||||
"tls": true, |
|
||||||
"capacity": 1000 |
|
||||||
} |
|
||||||
] |
|
||||||
} |
|
||||||
@ -1,35 +0,0 @@ |
|||||||
{ |
|
||||||
"nodes": [ |
|
||||||
{ |
|
||||||
"id": "webserver", |
|
||||||
"type": "webserver", |
|
||||||
"position": { "x": 0, "y": 0 }, |
|
||||||
"props": { |
|
||||||
"label": "Web Server", |
|
||||||
"rpsCapacity": 100 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "database", |
|
||||||
"type": "database", |
|
||||||
"position": { "x": 100, "y": 0 }, |
|
||||||
"props": { |
|
||||||
"label": "Primary DB", |
|
||||||
"replication": 2, |
|
||||||
"maxRPS": 500, |
|
||||||
"baseLatencyMs": 15 |
|
||||||
} |
|
||||||
} |
|
||||||
], |
|
||||||
"connections": [ |
|
||||||
{ |
|
||||||
"source": "webserver", |
|
||||||
"target": "database", |
|
||||||
"label": "DB Queries", |
|
||||||
"direction": "forward", |
|
||||||
"protocol": "TCP", |
|
||||||
"tls": true, |
|
||||||
"capacity": 1000 |
|
||||||
} |
|
||||||
] |
|
||||||
} |
|
||||||
@ -1,188 +0,0 @@ |
|||||||
{ |
|
||||||
"nodes": [ |
|
||||||
{ |
|
||||||
"id": "data-source", |
|
||||||
"type": "webserver", |
|
||||||
"position": { "x": 100, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "Data Ingestion API", |
|
||||||
"rpsCapacity": 500 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "raw-data-queue", |
|
||||||
"type": "messageQueue", |
|
||||||
"position": { "x": 300, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "Raw Data Queue", |
|
||||||
"queueCapacity": 10000, |
|
||||||
"retentionSeconds": 3600, |
|
||||||
"processingRate": 200 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "etl-pipeline-1", |
|
||||||
"type": "data pipeline", |
|
||||||
"position": { "x": 500, "y": 150 }, |
|
||||||
"props": { |
|
||||||
"label": "Data Cleansing Pipeline", |
|
||||||
"batchSize": 100, |
|
||||||
"transformation": "validate" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "etl-pipeline-2", |
|
||||||
"type": "data pipeline", |
|
||||||
"position": { "x": 500, "y": 250 }, |
|
||||||
"props": { |
|
||||||
"label": "Data Transformation Pipeline", |
|
||||||
"batchSize": 50, |
|
||||||
"transformation": "aggregate" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "ml-pipeline", |
|
||||||
"type": "data pipeline", |
|
||||||
"position": { "x": 700, "y": 150 }, |
|
||||||
"props": { |
|
||||||
"label": "ML Feature Pipeline", |
|
||||||
"batchSize": 200, |
|
||||||
"transformation": "enrich" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "analytics-pipeline", |
|
||||||
"type": "data pipeline", |
|
||||||
"position": { "x": 700, "y": 250 }, |
|
||||||
"props": { |
|
||||||
"label": "Analytics Pipeline", |
|
||||||
"batchSize": 500, |
|
||||||
"transformation": "join" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "cache-1", |
|
||||||
"type": "cache", |
|
||||||
"position": { "x": 900, "y": 150 }, |
|
||||||
"props": { |
|
||||||
"label": "Feature Cache", |
|
||||||
"cacheTTL": 300, |
|
||||||
"maxEntries": 50000, |
|
||||||
"evictionPolicy": "LRU" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "data-warehouse", |
|
||||||
"type": "database", |
|
||||||
"position": { "x": 900, "y": 250 }, |
|
||||||
"props": { |
|
||||||
"label": "Data Warehouse", |
|
||||||
"replication": 3, |
|
||||||
"maxRPS": 1000, |
|
||||||
"baseLatencyMs": 50 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "monitoring-1", |
|
||||||
"type": "monitoring/alerting", |
|
||||||
"position": { "x": 500, "y": 350 }, |
|
||||||
"props": { |
|
||||||
"label": "Pipeline Monitor", |
|
||||||
"tool": "Datadog", |
|
||||||
"alertMetric": "latency", |
|
||||||
"thresholdValue": 1000, |
|
||||||
"thresholdUnit": "ms" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "compression-pipeline", |
|
||||||
"type": "data pipeline", |
|
||||||
"position": { "x": 300, "y": 350 }, |
|
||||||
"props": { |
|
||||||
"label": "Data Compression", |
|
||||||
"batchSize": 1000, |
|
||||||
"transformation": "compress" |
|
||||||
} |
|
||||||
} |
|
||||||
], |
|
||||||
"connections": [ |
|
||||||
{ |
|
||||||
"source": "data-source", |
|
||||||
"target": "raw-data-queue", |
|
||||||
"label": "Raw Data Stream", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "raw-data-queue", |
|
||||||
"target": "etl-pipeline-1", |
|
||||||
"label": "Data Validation", |
|
||||||
"protocol": "tcp" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "raw-data-queue", |
|
||||||
"target": "etl-pipeline-2", |
|
||||||
"label": "Data Transformation", |
|
||||||
"protocol": "tcp" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "etl-pipeline-1", |
|
||||||
"target": "ml-pipeline", |
|
||||||
"label": "Clean Data", |
|
||||||
"protocol": "tcp" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "etl-pipeline-2", |
|
||||||
"target": "analytics-pipeline", |
|
||||||
"label": "Transformed Data", |
|
||||||
"protocol": "tcp" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "ml-pipeline", |
|
||||||
"target": "cache-1", |
|
||||||
"label": "ML Features", |
|
||||||
"protocol": "tcp" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "analytics-pipeline", |
|
||||||
"target": "data-warehouse", |
|
||||||
"label": "Analytics Data", |
|
||||||
"protocol": "tcp" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "etl-pipeline-1", |
|
||||||
"target": "monitoring-1", |
|
||||||
"label": "Pipeline Metrics", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "etl-pipeline-2", |
|
||||||
"target": "monitoring-1", |
|
||||||
"label": "Pipeline Metrics", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "ml-pipeline", |
|
||||||
"target": "monitoring-1", |
|
||||||
"label": "Pipeline Metrics", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "analytics-pipeline", |
|
||||||
"target": "monitoring-1", |
|
||||||
"label": "Pipeline Metrics", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "raw-data-queue", |
|
||||||
"target": "compression-pipeline", |
|
||||||
"label": "Archive Stream", |
|
||||||
"protocol": "tcp" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "compression-pipeline", |
|
||||||
"target": "data-warehouse", |
|
||||||
"label": "Compressed Archive", |
|
||||||
"protocol": "tcp" |
|
||||||
} |
|
||||||
] |
|
||||||
} |
|
||||||
@ -1,53 +0,0 @@ |
|||||||
{ |
|
||||||
"nodes": [ |
|
||||||
{ |
|
||||||
"id": "producer", |
|
||||||
"type": "webserver", |
|
||||||
"position": { "x": 0, "y": 0 }, |
|
||||||
"props": { |
|
||||||
"label": "Message Producer", |
|
||||||
"rpsCapacity": 50 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "messagequeue", |
|
||||||
"type": "messageQueue", |
|
||||||
"position": { "x": 100, "y": 0 }, |
|
||||||
"props": { |
|
||||||
"label": "Event Queue", |
|
||||||
"queueCapacity": 1000, |
|
||||||
"retentionSeconds": 3600, |
|
||||||
"processingRate": 100 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "consumer", |
|
||||||
"type": "webserver", |
|
||||||
"position": { "x": 200, "y": 0 }, |
|
||||||
"props": { |
|
||||||
"label": "Message Consumer", |
|
||||||
"rpsCapacity": 80 |
|
||||||
} |
|
||||||
} |
|
||||||
], |
|
||||||
"connections": [ |
|
||||||
{ |
|
||||||
"source": "producer", |
|
||||||
"target": "messagequeue", |
|
||||||
"label": "Publish Messages", |
|
||||||
"direction": "forward", |
|
||||||
"protocol": "AMQP", |
|
||||||
"tls": false, |
|
||||||
"capacity": 1000 |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "messagequeue", |
|
||||||
"target": "consumer", |
|
||||||
"label": "Consume Messages", |
|
||||||
"direction": "forward", |
|
||||||
"protocol": "AMQP", |
|
||||||
"tls": false, |
|
||||||
"capacity": 1000 |
|
||||||
} |
|
||||||
] |
|
||||||
} |
|
||||||
@ -1,96 +0,0 @@ |
|||||||
{ |
|
||||||
"nodes": [ |
|
||||||
{ |
|
||||||
"id": "webserver-1", |
|
||||||
"type": "webserver", |
|
||||||
"position": { "x": 100, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "API Gateway", |
|
||||||
"rpsCapacity": 200 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "lb-1", |
|
||||||
"type": "loadbalancer", |
|
||||||
"position": { "x": 300, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "API Gateway", |
|
||||||
"algorithm": "round-robin" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "microservice-1", |
|
||||||
"type": "microservice", |
|
||||||
"position": { "x": 500, "y": 150 }, |
|
||||||
"props": { |
|
||||||
"label": "User Service", |
|
||||||
"instanceCount": 3, |
|
||||||
"cpu": 4, |
|
||||||
"ramGb": 8, |
|
||||||
"rpsCapacity": 100, |
|
||||||
"monthlyUsd": 150, |
|
||||||
"scalingStrategy": "auto", |
|
||||||
"apiVersion": "v2" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "microservice-2", |
|
||||||
"type": "microservice", |
|
||||||
"position": { "x": 500, "y": 250 }, |
|
||||||
"props": { |
|
||||||
"label": "Order Service", |
|
||||||
"instanceCount": 2, |
|
||||||
"cpu": 2, |
|
||||||
"ramGb": 4, |
|
||||||
"rpsCapacity": 80, |
|
||||||
"monthlyUsd": 90, |
|
||||||
"scalingStrategy": "manual", |
|
||||||
"apiVersion": "v1" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "db-1", |
|
||||||
"type": "database", |
|
||||||
"position": { "x": 700, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "PostgreSQL", |
|
||||||
"replication": 2, |
|
||||||
"maxRPS": 500, |
|
||||||
"baseLatencyMs": 15 |
|
||||||
} |
|
||||||
} |
|
||||||
], |
|
||||||
"connections": [ |
|
||||||
{ |
|
||||||
"source": "webserver-1", |
|
||||||
"target": "lb-1", |
|
||||||
"label": "HTTPS Requests", |
|
||||||
"protocol": "https", |
|
||||||
"tls": true |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "lb-1", |
|
||||||
"target": "microservice-1", |
|
||||||
"label": "User API", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "lb-1", |
|
||||||
"target": "microservice-2", |
|
||||||
"label": "Order API", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "microservice-1", |
|
||||||
"target": "db-1", |
|
||||||
"label": "User Queries", |
|
||||||
"protocol": "tcp" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "microservice-2", |
|
||||||
"target": "db-1", |
|
||||||
"label": "Order Queries", |
|
||||||
"protocol": "tcp" |
|
||||||
} |
|
||||||
] |
|
||||||
} |
|
||||||
@ -1,127 +0,0 @@ |
|||||||
{ |
|
||||||
"nodes": [ |
|
||||||
{ |
|
||||||
"id": "webserver-1", |
|
||||||
"type": "webserver", |
|
||||||
"position": { "x": 100, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "Web Server", |
|
||||||
"rpsCapacity": 100 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "monitor-1", |
|
||||||
"type": "monitoring/alerting", |
|
||||||
"position": { "x": 300, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "Prometheus Monitor", |
|
||||||
"tool": "Prometheus", |
|
||||||
"alertMetric": "latency", |
|
||||||
"thresholdValue": 80, |
|
||||||
"thresholdUnit": "ms" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "lb-1", |
|
||||||
"type": "loadbalancer", |
|
||||||
"position": { "x": 500, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "Load Balancer", |
|
||||||
"algorithm": "round-robin" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "microservice-1", |
|
||||||
"type": "microservice", |
|
||||||
"position": { "x": 700, "y": 150 }, |
|
||||||
"props": { |
|
||||||
"label": "User Service", |
|
||||||
"instanceCount": 2, |
|
||||||
"cpu": 2, |
|
||||||
"ramGb": 4, |
|
||||||
"rpsCapacity": 50, |
|
||||||
"scalingStrategy": "auto" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "microservice-2", |
|
||||||
"type": "microservice", |
|
||||||
"position": { "x": 700, "y": 250 }, |
|
||||||
"props": { |
|
||||||
"label": "Order Service", |
|
||||||
"instanceCount": 1, |
|
||||||
"cpu": 1, |
|
||||||
"ramGb": 2, |
|
||||||
"rpsCapacity": 30, |
|
||||||
"scalingStrategy": "manual" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "monitor-2", |
|
||||||
"type": "monitoring/alerting", |
|
||||||
"position": { "x": 900, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "Error Rate Monitor", |
|
||||||
"tool": "Datadog", |
|
||||||
"alertMetric": "error_rate", |
|
||||||
"thresholdValue": 5, |
|
||||||
"thresholdUnit": "percent" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "db-1", |
|
||||||
"type": "database", |
|
||||||
"position": { "x": 1100, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "PostgreSQL", |
|
||||||
"replication": 2, |
|
||||||
"maxRPS": 200, |
|
||||||
"baseLatencyMs": 15 |
|
||||||
} |
|
||||||
} |
|
||||||
], |
|
||||||
"connections": [ |
|
||||||
{ |
|
||||||
"source": "webserver-1", |
|
||||||
"target": "monitor-1", |
|
||||||
"label": "HTTP Requests", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "monitor-1", |
|
||||||
"target": "lb-1", |
|
||||||
"label": "Monitored Requests", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "lb-1", |
|
||||||
"target": "microservice-1", |
|
||||||
"label": "User API", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "lb-1", |
|
||||||
"target": "microservice-2", |
|
||||||
"label": "Order API", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "microservice-1", |
|
||||||
"target": "monitor-2", |
|
||||||
"label": "Service Metrics", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "microservice-2", |
|
||||||
"target": "monitor-2", |
|
||||||
"label": "Service Metrics", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "monitor-2", |
|
||||||
"target": "db-1", |
|
||||||
"label": "Database Queries", |
|
||||||
"protocol": "tcp" |
|
||||||
} |
|
||||||
] |
|
||||||
} |
|
||||||
@ -1,164 +0,0 @@ |
|||||||
{ |
|
||||||
"nodes": [ |
|
||||||
{ |
|
||||||
"id": "webserver-1", |
|
||||||
"type": "webserver", |
|
||||||
"position": { "x": 100, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "E-commerce API", |
|
||||||
"rpsCapacity": 200 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "microservice-1", |
|
||||||
"type": "microservice", |
|
||||||
"position": { "x": 300, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "Payment Service", |
|
||||||
"instanceCount": 2, |
|
||||||
"cpu": 4, |
|
||||||
"ramGb": 8, |
|
||||||
"rpsCapacity": 100, |
|
||||||
"scalingStrategy": "auto" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "stripe-service", |
|
||||||
"type": "third party service", |
|
||||||
"position": { "x": 500, "y": 150 }, |
|
||||||
"props": { |
|
||||||
"label": "Stripe Payments", |
|
||||||
"provider": "Stripe", |
|
||||||
"latency": 180 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "twilio-service", |
|
||||||
"type": "third party service", |
|
||||||
"position": { "x": 500, "y": 250 }, |
|
||||||
"props": { |
|
||||||
"label": "SMS Notifications", |
|
||||||
"provider": "Twilio", |
|
||||||
"latency": 250 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "microservice-2", |
|
||||||
"type": "microservice", |
|
||||||
"position": { "x": 300, "y": 350 }, |
|
||||||
"props": { |
|
||||||
"label": "Notification Service", |
|
||||||
"instanceCount": 1, |
|
||||||
"cpu": 2, |
|
||||||
"ramGb": 4, |
|
||||||
"rpsCapacity": 50, |
|
||||||
"scalingStrategy": "manual" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "sendgrid-service", |
|
||||||
"type": "third party service", |
|
||||||
"position": { "x": 500, "y": 350 }, |
|
||||||
"props": { |
|
||||||
"label": "Email Service", |
|
||||||
"provider": "SendGrid", |
|
||||||
"latency": 200 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "slack-service", |
|
||||||
"type": "third party service", |
|
||||||
"position": { "x": 500, "y": 450 }, |
|
||||||
"props": { |
|
||||||
"label": "Slack Alerts", |
|
||||||
"provider": "Slack", |
|
||||||
"latency": 300 |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "monitor-1", |
|
||||||
"type": "monitoring/alerting", |
|
||||||
"position": { "x": 700, "y": 200 }, |
|
||||||
"props": { |
|
||||||
"label": "System Monitor", |
|
||||||
"tool": "Datadog", |
|
||||||
"alertMetric": "latency", |
|
||||||
"thresholdValue": 500, |
|
||||||
"thresholdUnit": "ms" |
|
||||||
} |
|
||||||
}, |
|
||||||
{ |
|
||||||
"id": "db-1", |
|
||||||
"type": "database", |
|
||||||
"position": { "x": 700, "y": 350 }, |
|
||||||
"props": { |
|
||||||
"label": "Transaction DB", |
|
||||||
"replication": 2, |
|
||||||
"maxRPS": 300, |
|
||||||
"baseLatencyMs": 20 |
|
||||||
} |
|
||||||
} |
|
||||||
], |
|
||||||
"connections": [ |
|
||||||
{ |
|
||||||
"source": "webserver-1", |
|
||||||
"target": "microservice-1", |
|
||||||
"label": "Payment Requests", |
|
||||||
"protocol": "https" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "microservice-1", |
|
||||||
"target": "stripe-service", |
|
||||||
"label": "Process Payment", |
|
||||||
"protocol": "https" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "microservice-1", |
|
||||||
"target": "twilio-service", |
|
||||||
"label": "SMS Confirmation", |
|
||||||
"protocol": "https" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "webserver-1", |
|
||||||
"target": "microservice-2", |
|
||||||
"label": "Notification Requests", |
|
||||||
"protocol": "https" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "microservice-2", |
|
||||||
"target": "sendgrid-service", |
|
||||||
"label": "Send Email", |
|
||||||
"protocol": "https" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "microservice-2", |
|
||||||
"target": "slack-service", |
|
||||||
"label": "Admin Alerts", |
|
||||||
"protocol": "https" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "stripe-service", |
|
||||||
"target": "monitor-1", |
|
||||||
"label": "Payment Metrics", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "twilio-service", |
|
||||||
"target": "monitor-1", |
|
||||||
"label": "SMS Metrics", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "sendgrid-service", |
|
||||||
"target": "monitor-1", |
|
||||||
"label": "Email Metrics", |
|
||||||
"protocol": "http" |
|
||||||
}, |
|
||||||
{ |
|
||||||
"source": "monitor-1", |
|
||||||
"target": "db-1", |
|
||||||
"label": "Store Metrics", |
|
||||||
"protocol": "tcp" |
|
||||||
} |
|
||||||
] |
|
||||||
} |
|
||||||
@ -1,219 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
import ( |
|
||||||
"math/rand" |
|
||||||
) |
|
||||||
|
|
||||||
type ThirdPartyServiceLogic struct{} |
|
||||||
|
|
||||||
type ServiceStatus struct { |
|
||||||
IsUp bool |
|
||||||
LastCheck int |
|
||||||
FailureCount int |
|
||||||
SuccessCount int |
|
||||||
RateLimitHits int |
|
||||||
} |
|
||||||
|
|
||||||
func (t ThirdPartyServiceLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) { |
|
||||||
// Extract third-party service properties
|
|
||||||
provider := AsString(props["provider"]) |
|
||||||
if provider == "" { |
|
||||||
provider = "Generic" // default provider
|
|
||||||
} |
|
||||||
|
|
||||||
baseLatency := int(AsFloat64(props["latency"])) |
|
||||||
if baseLatency == 0 { |
|
||||||
baseLatency = 200 // default 200ms latency
|
|
||||||
} |
|
||||||
|
|
||||||
// Get service status from props (persistent state)
|
|
||||||
status, ok := props["_serviceStatus"].(ServiceStatus) |
|
||||||
if !ok { |
|
||||||
status = ServiceStatus{ |
|
||||||
IsUp: true, |
|
||||||
LastCheck: tick, |
|
||||||
FailureCount: 0, |
|
||||||
SuccessCount: 0, |
|
||||||
RateLimitHits: 0, |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
currentTime := tick * 100 // Convert tick to milliseconds
|
|
||||||
|
|
||||||
// Simulate service availability and characteristics based on provider
|
|
||||||
reliability := t.getProviderReliability(provider) |
|
||||||
rateLimitRPS := t.getProviderRateLimit(provider) |
|
||||||
latencyVariance := t.getProviderLatencyVariance(provider) |
|
||||||
|
|
||||||
// Check if service is down and should recover
|
|
||||||
if !status.IsUp { |
|
||||||
// Services typically recover after some time
|
|
||||||
if currentTime-status.LastCheck > 30000 { // 30 seconds downtime
|
|
||||||
status.IsUp = true |
|
||||||
status.FailureCount = 0 |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Apply rate limiting - third-party services often have strict limits
|
|
||||||
requestsThisTick := len(queue) |
|
||||||
if requestsThisTick > rateLimitRPS { |
|
||||||
status.RateLimitHits++ |
|
||||||
// Only process up to rate limit
|
|
||||||
queue = queue[:rateLimitRPS] |
|
||||||
} |
|
||||||
|
|
||||||
output := []*Request{} |
|
||||||
|
|
||||||
for _, req := range queue { |
|
||||||
reqCopy := *req |
|
||||||
|
|
||||||
// Simulate service availability
|
|
||||||
if !status.IsUp { |
|
||||||
// Service is down - simulate timeout/error
|
|
||||||
reqCopy.LatencyMS += 10000 // 10 second timeout
|
|
||||||
reqCopy.Path = append(reqCopy.Path, "third-party-timeout") |
|
||||||
status.FailureCount++ |
|
||||||
} else { |
|
||||||
// Service is up - calculate response time
|
|
||||||
serviceLatency := t.calculateServiceLatency(provider, baseLatency, latencyVariance) |
|
||||||
|
|
||||||
// Random failure based on reliability
|
|
||||||
if rand.Float64() > reliability { |
|
||||||
// Service call failed
|
|
||||||
serviceLatency += 5000 // 5 second timeout on failure
|
|
||||||
reqCopy.Path = append(reqCopy.Path, "third-party-failed") |
|
||||||
status.FailureCount++ |
|
||||||
|
|
||||||
// If too many failures, mark service as down
|
|
||||||
if status.FailureCount > 5 { |
|
||||||
status.IsUp = false |
|
||||||
status.LastCheck = currentTime |
|
||||||
} |
|
||||||
} else { |
|
||||||
// Successful service call
|
|
||||||
reqCopy.Path = append(reqCopy.Path, "third-party-success") |
|
||||||
status.SuccessCount++ |
|
||||||
|
|
||||||
// Reset failure count on successful calls
|
|
||||||
if status.FailureCount > 0 { |
|
||||||
status.FailureCount-- |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
reqCopy.LatencyMS += serviceLatency |
|
||||||
} |
|
||||||
|
|
||||||
output = append(output, &reqCopy) |
|
||||||
} |
|
||||||
|
|
||||||
// Update persistent state
|
|
||||||
props["_serviceStatus"] = status |
|
||||||
|
|
||||||
// Health check: service is healthy if external service is up and not excessively rate limited
|
|
||||||
// Allow some rate limiting but not too much
|
|
||||||
maxRateLimitHits := 10 // Allow up to 10 rate limit hits before considering unhealthy
|
|
||||||
healthy := status.IsUp && status.RateLimitHits < maxRateLimitHits |
|
||||||
|
|
||||||
return output, healthy |
|
||||||
} |
|
||||||
|
|
||||||
// getProviderReliability returns the reliability percentage for different providers
|
|
||||||
func (t ThirdPartyServiceLogic) getProviderReliability(provider string) float64 { |
|
||||||
switch provider { |
|
||||||
case "Stripe": |
|
||||||
return 0.999 // 99.9% uptime
|
|
||||||
case "Twilio": |
|
||||||
return 0.998 // 99.8% uptime
|
|
||||||
case "SendGrid": |
|
||||||
return 0.997 // 99.7% uptime
|
|
||||||
case "AWS": |
|
||||||
return 0.9995 // 99.95% uptime
|
|
||||||
case "Google": |
|
||||||
return 0.9999 // 99.99% uptime
|
|
||||||
case "Slack": |
|
||||||
return 0.995 // 99.5% uptime
|
|
||||||
case "GitHub": |
|
||||||
return 0.996 // 99.6% uptime
|
|
||||||
case "Shopify": |
|
||||||
return 0.998 // 99.8% uptime
|
|
||||||
default: |
|
||||||
return 0.99 // 99% uptime for generic services
|
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// getProviderRateLimit returns the rate limit (requests per tick) for different providers
|
|
||||||
func (t ThirdPartyServiceLogic) getProviderRateLimit(provider string) int { |
|
||||||
switch provider { |
|
||||||
case "Stripe": |
|
||||||
return 100 // 100 requests per second (per tick in our sim)
|
|
||||||
case "Twilio": |
|
||||||
return 50 // More restrictive
|
|
||||||
case "SendGrid": |
|
||||||
return 200 // Email is typically higher volume
|
|
||||||
case "AWS": |
|
||||||
return 1000 // Very high limits
|
|
||||||
case "Google": |
|
||||||
return 500 // High but controlled
|
|
||||||
case "Slack": |
|
||||||
return 30 // Very restrictive for chat APIs
|
|
||||||
case "GitHub": |
|
||||||
return 60 // GitHub API limits
|
|
||||||
case "Shopify": |
|
||||||
return 80 // E-commerce API limits
|
|
||||||
default: |
|
||||||
return 100 // Default rate limit
|
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// getProviderLatencyVariance returns the latency variance factor for different providers
|
|
||||||
func (t ThirdPartyServiceLogic) getProviderLatencyVariance(provider string) float64 { |
|
||||||
switch provider { |
|
||||||
case "Stripe": |
|
||||||
return 0.3 // Low variance, consistent performance
|
|
||||||
case "Twilio": |
|
||||||
return 0.5 // Moderate variance
|
|
||||||
case "SendGrid": |
|
||||||
return 0.4 // Email services are fairly consistent
|
|
||||||
case "AWS": |
|
||||||
return 0.2 // Very consistent
|
|
||||||
case "Google": |
|
||||||
return 0.25 // Very consistent
|
|
||||||
case "Slack": |
|
||||||
return 0.6 // Chat services can be variable
|
|
||||||
case "GitHub": |
|
||||||
return 0.4 // Moderate variance
|
|
||||||
case "Shopify": |
|
||||||
return 0.5 // E-commerce can be variable under load
|
|
||||||
default: |
|
||||||
return 0.5 // Default variance
|
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// calculateServiceLatency computes the actual latency including variance
|
|
||||||
func (t ThirdPartyServiceLogic) calculateServiceLatency(provider string, baseLatency int, variance float64) int { |
|
||||||
// Add random variance to base latency
|
|
||||||
varianceMs := float64(baseLatency) * variance |
|
||||||
randomVariance := (rand.Float64() - 0.5) * 2 * varianceMs // -variance to +variance
|
|
||||||
|
|
||||||
finalLatency := float64(baseLatency) + randomVariance |
|
||||||
|
|
||||||
// Ensure minimum latency (can't be negative or too low)
|
|
||||||
if finalLatency < 10 { |
|
||||||
finalLatency = 10 |
|
||||||
} |
|
||||||
|
|
||||||
// Add provider-specific baseline adjustments
|
|
||||||
switch provider { |
|
||||||
case "AWS", "Google": |
|
||||||
// Cloud providers are typically fast
|
|
||||||
finalLatency *= 0.8 |
|
||||||
case "Slack": |
|
||||||
// Chat APIs can be slower
|
|
||||||
finalLatency *= 1.2 |
|
||||||
case "Twilio": |
|
||||||
// Telecom APIs have processing overhead
|
|
||||||
finalLatency *= 1.1 |
|
||||||
} |
|
||||||
|
|
||||||
return int(finalLatency) |
|
||||||
} |
|
||||||
@ -1,382 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
import ( |
|
||||||
"testing" |
|
||||||
) |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_BasicProcessing(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"provider": "Stripe", |
|
||||||
"latency": 150.0, |
|
||||||
} |
|
||||||
|
|
||||||
requests := []*Request{ |
|
||||||
{ID: "1", Type: "POST", LatencyMS: 50, Path: []string{}}, |
|
||||||
{ID: "2", Type: "GET", LatencyMS: 30, Path: []string{}}, |
|
||||||
} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected third party service to be healthy") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 2 { |
|
||||||
t.Errorf("Expected 2 processed requests, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Verify latency was added (should be around base latency with some variance)
|
|
||||||
for i, req := range output { |
|
||||||
originalLatency := requests[i].LatencyMS |
|
||||||
if req.LatencyMS <= originalLatency { |
|
||||||
t.Errorf("Expected third party service latency to be added") |
|
||||||
} |
|
||||||
|
|
||||||
// Check that path was updated
|
|
||||||
if len(req.Path) == 0 { |
|
||||||
t.Error("Expected path to be updated") |
|
||||||
} |
|
||||||
|
|
||||||
lastPathElement := req.Path[len(req.Path)-1] |
|
||||||
if lastPathElement != "third-party-success" && lastPathElement != "third-party-failed" { |
|
||||||
t.Errorf("Expected path to indicate success or failure, got %s", lastPathElement) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_ProviderCharacteristics(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
providers := []string{"Stripe", "AWS", "Slack", "Twilio"} |
|
||||||
|
|
||||||
for _, provider := range providers { |
|
||||||
t.Run(provider, func(t *testing.T) { |
|
||||||
props := map[string]any{ |
|
||||||
"provider": provider, |
|
||||||
"latency": 100.0, |
|
||||||
} |
|
||||||
|
|
||||||
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 0, Path: []string{}}} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Errorf("Expected %s service to be healthy", provider) |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 1 { |
|
||||||
t.Errorf("Expected 1 processed request for %s", provider) |
|
||||||
} |
|
||||||
|
|
||||||
// Verify latency characteristics
|
|
||||||
addedLatency := output[0].LatencyMS |
|
||||||
if addedLatency <= 0 { |
|
||||||
t.Errorf("Expected %s to add latency", provider) |
|
||||||
} |
|
||||||
|
|
||||||
// AWS and Google should be faster than Slack
|
|
||||||
if provider == "AWS" && addedLatency > 200 { |
|
||||||
t.Errorf("Expected AWS to have lower latency, got %dms", addedLatency) |
|
||||||
} |
|
||||||
}) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_RateLimiting(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"provider": "Slack", // Has low rate limit (30 RPS)
|
|
||||||
"latency": 100.0, |
|
||||||
} |
|
||||||
|
|
||||||
// Send more requests than rate limit
|
|
||||||
requests := make([]*Request, 50) // More than Slack's 30 RPS limit
|
|
||||||
for i := range requests { |
|
||||||
requests[i] = &Request{ID: string(rune('1' + i)), Type: "POST", LatencyMS: 0} |
|
||||||
} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
// Should only process up to rate limit
|
|
||||||
if len(output) != 30 { |
|
||||||
t.Errorf("Expected 30 processed requests due to Slack rate limit, got %d", len(output)) |
|
||||||
} |
|
||||||
|
|
||||||
// Service should still be healthy with rate limiting
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected service to be healthy despite rate limiting") |
|
||||||
} |
|
||||||
|
|
||||||
// Check that rate limit hits were recorded
|
|
||||||
status, ok := props["_serviceStatus"].(ServiceStatus) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected service status to be recorded") |
|
||||||
} |
|
||||||
|
|
||||||
if status.RateLimitHits != 1 { |
|
||||||
t.Errorf("Expected 1 rate limit hit, got %d", status.RateLimitHits) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_ServiceFailure(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"provider": "Generic", |
|
||||||
"latency": 100.0, |
|
||||||
} |
|
||||||
|
|
||||||
// Set up service as already having failures
|
|
||||||
status := ServiceStatus{ |
|
||||||
IsUp: false, |
|
||||||
LastCheck: 0, |
|
||||||
FailureCount: 6, |
|
||||||
} |
|
||||||
props["_serviceStatus"] = status |
|
||||||
|
|
||||||
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 50, Path: []string{}}} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if healthy { |
|
||||||
t.Error("Expected service to be unhealthy when external service is down") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 1 { |
|
||||||
t.Error("Expected request to be processed even when service is down") |
|
||||||
} |
|
||||||
|
|
||||||
// Should have very high latency due to timeout
|
|
||||||
if output[0].LatencyMS < 5000 { |
|
||||||
t.Errorf("Expected high latency for service failure, got %dms", output[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Check path indicates timeout
|
|
||||||
lastPath := output[0].Path[len(output[0].Path)-1] |
|
||||||
if lastPath != "third-party-timeout" { |
|
||||||
t.Errorf("Expected timeout path, got %s", lastPath) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_ServiceRecovery(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"provider": "Stripe", |
|
||||||
"latency": 100.0, |
|
||||||
} |
|
||||||
|
|
||||||
// Set up service as down but with old timestamp (should recover)
|
|
||||||
status := ServiceStatus{ |
|
||||||
IsUp: false, |
|
||||||
LastCheck: 0, // Very old timestamp
|
|
||||||
FailureCount: 3, |
|
||||||
} |
|
||||||
props["_serviceStatus"] = status |
|
||||||
|
|
||||||
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 50, Path: []string{}}} |
|
||||||
|
|
||||||
// Run with current tick that's more than 30 seconds later
|
|
||||||
_, healthy := logic.Tick(props, requests, 400) // 40 seconds later
|
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected service to be healthy after recovery") |
|
||||||
} |
|
||||||
|
|
||||||
// Check that service recovered
|
|
||||||
updatedStatus, ok := props["_serviceStatus"].(ServiceStatus) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected updated service status") |
|
||||||
} |
|
||||||
|
|
||||||
if !updatedStatus.IsUp { |
|
||||||
t.Error("Expected service to have recovered") |
|
||||||
} |
|
||||||
|
|
||||||
if updatedStatus.FailureCount != 0 { |
|
||||||
t.Error("Expected failure count to be reset on recovery") |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_ReliabilityDifferences(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
// Test different reliability levels
|
|
||||||
testCases := []struct { |
|
||||||
provider string |
|
||||||
expectedReliability float64 |
|
||||||
}{ |
|
||||||
{"AWS", 0.9995}, |
|
||||||
{"Google", 0.9999}, |
|
||||||
{"Stripe", 0.999}, |
|
||||||
{"Slack", 0.995}, |
|
||||||
{"Generic", 0.99}, |
|
||||||
} |
|
||||||
|
|
||||||
for _, tc := range testCases { |
|
||||||
reliability := logic.getProviderReliability(tc.provider) |
|
||||||
if reliability != tc.expectedReliability { |
|
||||||
t.Errorf("Expected %s reliability %.4f, got %.4f", |
|
||||||
tc.provider, tc.expectedReliability, reliability) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_RateLimitDifferences(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
// Test different rate limits
|
|
||||||
testCases := []struct { |
|
||||||
provider string |
|
||||||
expectedLimit int |
|
||||||
}{ |
|
||||||
{"AWS", 1000}, |
|
||||||
{"Stripe", 100}, |
|
||||||
{"Slack", 30}, |
|
||||||
{"SendGrid", 200}, |
|
||||||
{"Twilio", 50}, |
|
||||||
} |
|
||||||
|
|
||||||
for _, tc := range testCases { |
|
||||||
rateLimit := logic.getProviderRateLimit(tc.provider) |
|
||||||
if rateLimit != tc.expectedLimit { |
|
||||||
t.Errorf("Expected %s rate limit %d, got %d", |
|
||||||
tc.provider, tc.expectedLimit, rateLimit) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_LatencyVariance(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"provider": "Stripe", |
|
||||||
"latency": 100.0, |
|
||||||
} |
|
||||||
|
|
||||||
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 0, Path: []string{}}} |
|
||||||
|
|
||||||
latencies := []int{} |
|
||||||
|
|
||||||
// Run multiple times to observe variance
|
|
||||||
for i := 0; i < 10; i++ { |
|
||||||
output, _ := logic.Tick(props, requests, i) |
|
||||||
latencies = append(latencies, output[0].LatencyMS) |
|
||||||
} |
|
||||||
|
|
||||||
// Check that we have variance (not all latencies are the same)
|
|
||||||
allSame := true |
|
||||||
firstLatency := latencies[0] |
|
||||||
for _, latency := range latencies[1:] { |
|
||||||
if latency != firstLatency { |
|
||||||
allSame = false |
|
||||||
break |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if allSame { |
|
||||||
t.Error("Expected latency variance, but all latencies were the same") |
|
||||||
} |
|
||||||
|
|
||||||
// All latencies should be reasonable (between 50ms and 300ms for Stripe)
|
|
||||||
for _, latency := range latencies { |
|
||||||
if latency < 50 || latency > 300 { |
|
||||||
t.Errorf("Expected reasonable latency for Stripe, got %dms", latency) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_DefaultValues(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
// Empty props should use defaults
|
|
||||||
props := map[string]any{} |
|
||||||
|
|
||||||
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 0, Path: []string{}}} |
|
||||||
|
|
||||||
output, healthy := logic.Tick(props, requests, 1) |
|
||||||
|
|
||||||
if !healthy { |
|
||||||
t.Error("Expected service to be healthy with default values") |
|
||||||
} |
|
||||||
|
|
||||||
if len(output) != 1 { |
|
||||||
t.Error("Expected 1 processed request with defaults") |
|
||||||
} |
|
||||||
|
|
||||||
// Should have reasonable default latency (around 200ms base)
|
|
||||||
if output[0].LatencyMS < 100 || output[0].LatencyMS > 400 { |
|
||||||
t.Errorf("Expected reasonable default latency, got %dms", output[0].LatencyMS) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_SuccessCountTracking(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"provider": "AWS", // High reliability
|
|
||||||
"latency": 50.0, |
|
||||||
} |
|
||||||
|
|
||||||
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 0, Path: []string{}}} |
|
||||||
|
|
||||||
// Run multiple successful requests
|
|
||||||
for i := 0; i < 5; i++ { |
|
||||||
logic.Tick(props, requests, i) |
|
||||||
} |
|
||||||
|
|
||||||
status, ok := props["_serviceStatus"].(ServiceStatus) |
|
||||||
if !ok { |
|
||||||
t.Error("Expected service status to be tracked") |
|
||||||
} |
|
||||||
|
|
||||||
// Should have accumulated success count
|
|
||||||
if status.SuccessCount == 0 { |
|
||||||
t.Error("Expected success count to be tracked") |
|
||||||
} |
|
||||||
|
|
||||||
// Should be healthy
|
|
||||||
if !status.IsUp { |
|
||||||
t.Error("Expected service to remain up with successful calls") |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func TestThirdPartyServiceLogic_FailureRecovery(t *testing.T) { |
|
||||||
logic := ThirdPartyServiceLogic{} |
|
||||||
|
|
||||||
props := map[string]any{ |
|
||||||
"provider": "Generic", |
|
||||||
"latency": 100.0, |
|
||||||
} |
|
||||||
|
|
||||||
// Set up service with some failures but still up
|
|
||||||
status := ServiceStatus{ |
|
||||||
IsUp: true, |
|
||||||
FailureCount: 3, |
|
||||||
SuccessCount: 0, |
|
||||||
} |
|
||||||
props["_serviceStatus"] = status |
|
||||||
|
|
||||||
requests := []*Request{{ID: "1", Type: "POST", LatencyMS: 0, Path: []string{}}} |
|
||||||
|
|
||||||
// Simulate a successful call (with high probability for Generic service)
|
|
||||||
// We'll run this multiple times to ensure we get at least one success
|
|
||||||
successFound := false |
|
||||||
for i := 0; i < 10 && !successFound; i++ { |
|
||||||
output, _ := logic.Tick(props, requests, i) |
|
||||||
if len(output[0].Path) > 0 && output[0].Path[len(output[0].Path)-1] == "third-party-success" { |
|
||||||
successFound = true |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if successFound { |
|
||||||
updatedStatus, _ := props["_serviceStatus"].(ServiceStatus) |
|
||||||
// Failure count should have decreased
|
|
||||||
if updatedStatus.FailureCount >= 3 { |
|
||||||
t.Error("Expected failure count to decrease after successful call") |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,18 +0,0 @@ |
|||||||
package simulation |
|
||||||
|
|
||||||
// UserLogic represents the behavior of user components in the simulation.
|
|
||||||
// User components serve as traffic sources and don't process requests themselves.
|
|
||||||
// Traffic generation is handled by the simulation engine at the entry point.
|
|
||||||
type UserLogic struct{} |
|
||||||
|
|
||||||
// Tick implements the NodeLogic interface for User components.
|
|
||||||
// User components don't process requests - they just pass them through.
|
|
||||||
// The simulation engine handles traffic generation at entry points.
|
|
||||||
func (u UserLogic) Tick(props map[string]any, queue []*Request, tick int) ([]*Request, bool) { |
|
||||||
// User components just pass through any requests they receive
|
|
||||||
// In practice, User components are typically entry points so they
|
|
||||||
// receive requests from the simulation engine itself
|
|
||||||
return queue, true |
|
||||||
} |
|
||||||
|
|
||||||
|
|
||||||
@ -1,93 +0,0 @@ |
|||||||
package handlers |
|
||||||
|
|
||||||
import ( |
|
||||||
"context" |
|
||||||
"encoding/json" |
|
||||||
"fmt" |
|
||||||
"log" |
|
||||||
"net/http" |
|
||||||
"os" |
|
||||||
|
|
||||||
"github.com/gorilla/websocket" |
|
||||||
claude "github.com/potproject/claude-sdk-go" |
|
||||||
) |
|
||||||
|
|
||||||
var upgrader = websocket.Upgrader{ |
|
||||||
CheckOrigin: func(r *http.Request) bool { |
|
||||||
return true |
|
||||||
}, |
|
||||||
} |
|
||||||
|
|
||||||
type MessageReceived struct { |
|
||||||
Message string `json:"message"` |
|
||||||
DesignPayload string `json:"designPayload"` |
|
||||||
} |
|
||||||
|
|
||||||
func Messages(w http.ResponseWriter, r *http.Request) { |
|
||||||
conn, err := upgrader.Upgrade(w, r, nil) |
|
||||||
if err != nil { |
|
||||||
log.Printf("WebSocket upgrade failed: %v", err) |
|
||||||
return |
|
||||||
} |
|
||||||
defer conn.Close() |
|
||||||
|
|
||||||
client := claude.NewClient(os.Getenv("CLAUDE_API_KEY")) |
|
||||||
|
|
||||||
for { |
|
||||||
messageType, message, err := conn.ReadMessage() |
|
||||||
if err != nil { |
|
||||||
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) { |
|
||||||
log.Printf("WebSocket error: %v", err) |
|
||||||
} |
|
||||||
break |
|
||||||
} |
|
||||||
|
|
||||||
var messageReceived MessageReceived |
|
||||||
err = json.Unmarshal(message, &messageReceived) |
|
||||||
if err != nil { |
|
||||||
fmt.Printf("error unmarshalling response: %v", err) |
|
||||||
continue |
|
||||||
} |
|
||||||
|
|
||||||
if messageReceived.Message == "" { |
|
||||||
messageReceived.Message = "<user did not send text>" |
|
||||||
} |
|
||||||
// Note: messageReceived.Message is already properly parsed from JSON, no need to overwrite it
|
|
||||||
|
|
||||||
prompt := fmt.Sprintf("You are a tutor that helps people learn system design. You will be given a JSON payload that looks like %s. The nodes are the components a user can put into their design and the connections will tell you how they are connected. The level name identifies what problem they are working on as well as a difficulty level. Each level has an easy, medium or hard setting. Also in the payload, there is a list of components that a user can use to build their design. Your hints and responses should only refer to these components and not refer to things that the user cannot use. Always refer to the nodes by their type. Please craft your response as if you're talking to the user. And do not reference the payload as \"payload\" but as their design. Also, please do not show the payload in your response. Do not refer to components as node-0 or whatever. Always refer to the type of component they are. Always assume that the source of traffic for any system is a user. The user component will not be visible in teh payload. Also make sure you use html to format your answer. Do not over format your response. Only use p tags. Format lists using proper lists html. Anytime the user sends a different payload back to you, make note of what is correct. Never give the actual answer, only helpful hints. If the available components do not allow the user to feasibly solve the system design problem, you should mention it and then tell them what exactly is missing from the list.", messageReceived.DesignPayload) |
|
||||||
|
|
||||||
m := claude.RequestBodyMessages{ |
|
||||||
Model: "claude-3-7-sonnet-20250219", |
|
||||||
MaxTokens: 1024, |
|
||||||
SystemTypeText: []claude.RequestBodySystemTypeText{ |
|
||||||
claude.UseSystemCacheEphemeral(prompt), |
|
||||||
}, |
|
||||||
Messages: []claude.RequestBodyMessagesMessages{ |
|
||||||
{ |
|
||||||
Role: claude.MessagesRoleUser, |
|
||||||
ContentTypeText: []claude.RequestBodyMessagesMessagesContentTypeText{ |
|
||||||
{ |
|
||||||
Text: messageReceived.Message, |
|
||||||
CacheControl: claude.UseCacheEphemeral(), |
|
||||||
}, |
|
||||||
}, |
|
||||||
}, |
|
||||||
}, |
|
||||||
} |
|
||||||
|
|
||||||
ctx := context.Background() |
|
||||||
res, err := client.CreateMessages(ctx, m) |
|
||||||
if err != nil { |
|
||||||
fmt.Printf("error creating messages: %v", err) |
|
||||||
} |
|
||||||
|
|
||||||
// Echo the message back to client
|
|
||||||
err = conn.WriteMessage(messageType, []byte(res.Content[0].Text)) |
|
||||||
if err != nil { |
|
||||||
log.Printf("Write error: %v", err) |
|
||||||
break |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
log.Println("Client disconnected") |
|
||||||
} |
|
||||||
@ -1,306 +0,0 @@ |
|||||||
{{ define "canvas" }} |
|
||||||
<div id="canvas-wrapper"> |
|
||||||
<input class="tabinput" type="radio" id="tab1" name="tab" checked> |
|
||||||
<input class="tabinput" type="radio" id="tab2" name="tab"> |
|
||||||
<!-- <input class="tabinput" type="radio" id="tab3" name="tab"> --> |
|
||||||
|
|
||||||
<div class="tabs"> |
|
||||||
<div class="tab-labels"> |
|
||||||
<label for="tab1">Requirements</label> |
|
||||||
<label for="tab2">Design</label> |
|
||||||
<!-- <label for="tab3">Resources</label> --> |
|
||||||
</div> |
|
||||||
|
|
||||||
<!-- Requirements --> |
|
||||||
<div id="content1" class="tab-content"> |
|
||||||
{{ if .Level.InterviewerRequirements }} |
|
||||||
<div class="requirements-section"> |
|
||||||
<h3>Interviewer Requirements</h3> |
|
||||||
<ul class="requirements-list"> |
|
||||||
{{ range .Level.InterviewerRequirements }} |
|
||||||
<li class="requirement-item">{{ . }}</li> |
|
||||||
{{ end }} |
|
||||||
</ul> |
|
||||||
</div> |
|
||||||
{{ end }} |
|
||||||
|
|
||||||
{{ if .Level.FunctionalRequirements }} |
|
||||||
<div class="requirements-section"> |
|
||||||
<h3>Functional Requirements</h3> |
|
||||||
<ul class="requirements-list"> |
|
||||||
{{ range .Level.FunctionalRequirements }} |
|
||||||
<li class="requirement-item">{{ . }}</li> |
|
||||||
{{ end }} |
|
||||||
</ul> |
|
||||||
</div> |
|
||||||
{{ end }} |
|
||||||
|
|
||||||
{{ if .Level.NonFunctionalRequirements }} |
|
||||||
<div class="requirements-section"> |
|
||||||
<h3>Non-Functional Requirements</h3> |
|
||||||
<ul class="requirements-list"> |
|
||||||
{{ range .Level.NonFunctionalRequirements }} |
|
||||||
<li class="requirement-item">{{ . }}</li> |
|
||||||
{{ end }} |
|
||||||
</ul> |
|
||||||
</div> |
|
||||||
{{ end }} |
|
||||||
|
|
||||||
<div class="continue-section"> |
|
||||||
<button class="continue-button" id="create-design-button">Create your design</button> |
|
||||||
<button class="continue-button" id="learn-more-button">Learn more</button> |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
|
|
||||||
<!-- Design--> |
|
||||||
<div id="content2" class="tab-content"> |
|
||||||
<div id="sidebar"> |
|
||||||
<div class="component-icon" draggable="true" data-type="user"> |
|
||||||
user |
|
||||||
</div> |
|
||||||
|
|
||||||
<div class="component-icon" draggable="true" data-type="loadBalancer"> |
|
||||||
load balancer |
|
||||||
</div> |
|
||||||
|
|
||||||
<div class="component-icon" draggable="true" data-type="webserver"> |
|
||||||
webserver |
|
||||||
</div> |
|
||||||
<div class="component-icon" draggable="true" data-type="database"> |
|
||||||
database |
|
||||||
</div> |
|
||||||
|
|
||||||
<div class="component-icon" draggable="true" data-type="cache"> |
|
||||||
cache |
|
||||||
</div> |
|
||||||
|
|
||||||
<div class="component-icon" draggable="true" data-type="messageQueue"> |
|
||||||
message queue |
|
||||||
</div> |
|
||||||
|
|
||||||
<div class="component-icon" draggable="true" data-type="cdn"> |
|
||||||
CDN |
|
||||||
</div> |
|
||||||
|
|
||||||
<div class="component-icon" draggable="true" data-type="microservice"> |
|
||||||
microservice node |
|
||||||
</div> |
|
||||||
|
|
||||||
<div class="component-icon" draggable="true" data-type="data pipeline"> |
|
||||||
data pipeline |
|
||||||
</div> |
|
||||||
|
|
||||||
<div class="component-icon" draggable="true" data-type="monitoring/alerting"> |
|
||||||
monitoring/alerting |
|
||||||
</div> |
|
||||||
|
|
||||||
<div class="component-icon" draggable="true" data-type="third party service"> |
|
||||||
third-party service |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
|
|
||||||
<div id="canvas-container"> |
|
||||||
<div id="connection-modal" style="display: none;" class="modal"> |
|
||||||
<div class="modal-content"> |
|
||||||
<h3>Create Connection</h3> |
|
||||||
<label> |
|
||||||
Label: |
|
||||||
<input type="text" id="connection-label" value="Read traffic"> |
|
||||||
</label> |
|
||||||
<label> |
|
||||||
Protocol: |
|
||||||
<select id="connection-protocol"> |
|
||||||
<option>HTTP</option> |
|
||||||
<option>HTTPS</option> |
|
||||||
<option>Database</option> |
|
||||||
<option>Redis</option> |
|
||||||
</select> |
|
||||||
</label> |
|
||||||
<label style="margin-top: 10px;"> |
|
||||||
<input type="checkbox" id="connection-tls"> |
|
||||||
Enable TLS (encryption) |
|
||||||
</label> |
|
||||||
<label for="connection-capacity">Capacity Limit (RPS):</label> |
|
||||||
<input type="number" id="connection-capacity" value="1000" min="1" /> |
|
||||||
<div class="modal-actions"> |
|
||||||
<button id="connection-save">Save</button> |
|
||||||
<button id="connection-cancel">Cancel</button> |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
<div id="canvas-toolbar"> |
|
||||||
<button id="arrow-tool-btn" class="toolbar-btn">Arrow Tool</button> |
|
||||||
</div> |
|
||||||
|
|
||||||
<svg id="canvas"> |
|
||||||
<defs> |
|
||||||
<marker id="arrowhead-start" markerWidth="10" markerHeight="7" refX="0" refY="3.5" orient="auto" |
|
||||||
markerUnits="strokeWidth"> |
|
||||||
<path d="M10 0 L0 3.5 L10 7" fill="#ccc" /> |
|
||||||
</marker> |
|
||||||
<marker id="arrowhead-end" markerWidth="10" markerHeight="7" refX="10" refY="3.5" orient="auto" |
|
||||||
markerUnits="strokeWidth"> |
|
||||||
<path d="M0 0 L10 3.5 L0 7" fill="#ccc" /> |
|
||||||
</marker> |
|
||||||
</defs> |
|
||||||
</svg> |
|
||||||
<div id="node-props-panel"> |
|
||||||
<h3>node properties</h3> |
|
||||||
<div id="label-group" data-group="label-group"> |
|
||||||
<label>label:</label> |
|
||||||
<input type="text" name="label" /> |
|
||||||
</div> |
|
||||||
<div id="db-group" class="prop-group" data-group="db-group"> |
|
||||||
<label>replication factor:<input type="number" name="replication" min="1" step="1" /></label> |
|
||||||
</div> |
|
||||||
<div id="cache-group" class="prop-group" data-group="cache-group"> |
|
||||||
<label>cache ttl (secs):<input type="number" name="cacheTTL" min="0" step="60" /></label> |
|
||||||
<label>Max Entries: <input name="maxEntries" type="number" /></label> |
|
||||||
<label>Eviction Policy: |
|
||||||
<select name="evictionPolicy"> |
|
||||||
<option value="LRU">LRU (Least Recently Used)</option> |
|
||||||
<option value="LFU">LFU (Least Frequently Used)</option> |
|
||||||
</select> |
|
||||||
</label> |
|
||||||
</div> |
|
||||||
<div id="compute-group" data-group="compute-group" class="prop-group"> |
|
||||||
<label>RPS Capacity:</label> |
|
||||||
<input type="number" name="rpsCapacity" min="1" /> |
|
||||||
|
|
||||||
<label>Base Latency (ms):</label> |
|
||||||
<input type="number" name="baseLatencyMs" min="1" /> |
|
||||||
</div> |
|
||||||
<div id="lb-group" data-group="lb-group" class="prop-group"> |
|
||||||
<label>Algorithm</label> |
|
||||||
<select name="algorithm"> |
|
||||||
<option value="round-robin">Round Robin</option> |
|
||||||
<option value="least-connection">Least Connection</option> |
|
||||||
</select> |
|
||||||
</div> |
|
||||||
<div id="mq-group" data-group="mq-group" class="prop-group"> |
|
||||||
<label>Queue Capacity (,ax Messages that can be held in que)</label> |
|
||||||
<input type="number" name="queueCapacity" min="1" /> |
|
||||||
|
|
||||||
<label>Retention Time (seconds)</label> |
|
||||||
<input type="number" name="retentionSeconds" min="1" /> |
|
||||||
</div> |
|
||||||
<div id="cdn-group" data-group="cdn-group" class="prop-group"> |
|
||||||
<label>TTL (seconds)</label> |
|
||||||
<input type="number" name="ttl" min="1" /> |
|
||||||
|
|
||||||
<label>Geo Replication</label> |
|
||||||
<select name="geoReplication"> |
|
||||||
<option value="global">Global</option> |
|
||||||
<option value="regional">Regional</option> |
|
||||||
<option value="custom">Custom</option> |
|
||||||
</select> |
|
||||||
|
|
||||||
<label>Caching Strategy</label> |
|
||||||
<select name="cachingStrategy"> |
|
||||||
<option value="cache-first">Cache First</option> |
|
||||||
<option value="network-first">Network First</option> |
|
||||||
<option value="stale-while-revalidate">Stale While Revalidate</option> |
|
||||||
</select> |
|
||||||
|
|
||||||
<label>Compression</label> |
|
||||||
<select name="compression"> |
|
||||||
<option value="brotli">Brotli</option> |
|
||||||
<option value="gzip">Gzip</option> |
|
||||||
<option value="none">None</option> |
|
||||||
</select> |
|
||||||
|
|
||||||
<label>HTTP/2 Support</label> |
|
||||||
<select name="http2"> |
|
||||||
<option value="enabled">Enabled</option> |
|
||||||
<option value="disabled">Disabled</option> |
|
||||||
</select> |
|
||||||
</div> |
|
||||||
<div id="microservice-group" data-group="microservice-group" class="prop-group"> |
|
||||||
<label> |
|
||||||
Instance Count: |
|
||||||
<input type="number" name="instanceCount" value="3" min="1" /> |
|
||||||
</label> |
|
||||||
|
|
||||||
<label> |
|
||||||
CPU (vCPUs): |
|
||||||
<input type="number" name="cpu" value="2" min="1" /> |
|
||||||
</label> |
|
||||||
|
|
||||||
<label> |
|
||||||
RAM (GB): |
|
||||||
<input type="number" name="ramGb" value="4" min="1" /> |
|
||||||
</label> |
|
||||||
|
|
||||||
<label> |
|
||||||
RPS Capacity: |
|
||||||
<input type="number" name="rpsCapacity" value="150" min="1" /> |
|
||||||
</label> |
|
||||||
|
|
||||||
<label> |
|
||||||
Scaling Strategy: |
|
||||||
<select name="scalingStrategy"> |
|
||||||
<option value="auto" selected>Auto</option> |
|
||||||
<option value="manual">Manual</option> |
|
||||||
</select> |
|
||||||
</label> |
|
||||||
</div> |
|
||||||
<div id="datapipeline-group" data-group="pipeline-group" class="prop-group"> |
|
||||||
<label>Batch Size</label> |
|
||||||
<input type="number" name="batchSize" min="1" /> |
|
||||||
|
|
||||||
<label>Schedule</label> |
|
||||||
<select name="schedule"> |
|
||||||
<option value="realtime">Real-time</option> |
|
||||||
<option value="hourly">Hourly</option> |
|
||||||
<option value="daily">Daily</option> |
|
||||||
<option value="weekly">Weekly</option> |
|
||||||
</select> |
|
||||||
|
|
||||||
<label>Transformations</label> |
|
||||||
<select name="transformations"> |
|
||||||
<option value="normalize">Normalize</option> |
|
||||||
<option value="dedupe">Dedupe</option> |
|
||||||
<option value="filter">Filter</option> |
|
||||||
<option value="enrich">Enrich</option> |
|
||||||
<option value="aggregate">Aggregate</option> |
|
||||||
</select> |
|
||||||
|
|
||||||
<label>Destination</label> |
|
||||||
<input type="text" name="destination" placeholder="e.g. data warehouse" /> |
|
||||||
</div> |
|
||||||
<div id="monitor-group" data-group="monitor-group" class="prop-group"> |
|
||||||
<label>Monitoring Tool</label> |
|
||||||
<select name="tool"> |
|
||||||
<option value="Prometheus">Prometheus</option> |
|
||||||
<option value="Datadog">Datadog</option> |
|
||||||
<option value="New Relic">New Relic</option> |
|
||||||
<option value="Grafana Cloud">Grafana Cloud</option> |
|
||||||
</select> |
|
||||||
|
|
||||||
<label>Alert Threshold (%)</label> |
|
||||||
<input type="number" name="alertThreshold" min="0" max="100" /> |
|
||||||
</div> |
|
||||||
<div id="third-party-group" data-group="third-party-group" class="prop-group"> |
|
||||||
<label>Provider</label> |
|
||||||
<input type="text" name="provider" /> |
|
||||||
|
|
||||||
<label>Latency (ms)</label> |
|
||||||
<input type="number" name="latency" min="0" /> |
|
||||||
</div> |
|
||||||
|
|
||||||
|
|
||||||
<!-- PUT NEW COMPONENTS BEFORE THIS BUTTON --> |
|
||||||
<button id="node-props-save" disabled>save</button> |
|
||||||
</div> |
|
||||||
<div id="bottom-panel"> |
|
||||||
<button id="run-button" disabled>Test Design</button> |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
<!-- Metrics--> |
|
||||||
<!-- <div id="content3" class="tab-content">This is Tab 3 content.</div> --> |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
{{ end }} |
|
||||||
@ -1,12 +0,0 @@ |
|||||||
{{ define "challenges" }} |
|
||||||
<div id="challenge-container"> |
|
||||||
<h2 class="sidebar-title">Challenges</h2> |
|
||||||
<ul class="challenge-list"> |
|
||||||
{{range .Levels}} |
|
||||||
<li class="challenge-item {{if eq .ID $.Level.ID}}active{{end}}"> |
|
||||||
<div class="challenge-name"><a href="/play/{{.ID}}">{{.Name}}</a></div> |
|
||||||
</li> |
|
||||||
{{end}} |
|
||||||
</ul> |
|
||||||
</div> |
|
||||||
{{ end }} |
|
||||||
@ -1,41 +0,0 @@ |
|||||||
{{ define "chat" }} |
|
||||||
<label for="chat-checkbox"> |
|
||||||
<div aria-label="Send message" id="start-chat"> |
|
||||||
<svg class="chat-bubble" width="32" height="32" viewBox="0 0 64 64" xmlns="http://www.w3.org/2000/svg" |
|
||||||
fill="none" stroke="white" stroke-width="4"> |
|
||||||
<path |
|
||||||
d="M4 12C4 7.58 8.03 4 13 4h38c4.97 0 9 3.58 9 8v24c0 4.42-4.03 8-9 8H22l-12 12v-12H13c-4.97 0-9-3.58-9-8V12z" /> |
|
||||||
</svg> |
|
||||||
|
|
||||||
</div> |
|
||||||
</label> |
|
||||||
<input type="checkbox" name="chat-checkbox" id="chat-checkbox" class="chat-checkbox" /> |
|
||||||
<div class="chat" id="chat-box"> |
|
||||||
<div id="chat-header"> |
|
||||||
<p class="chat-title">System Design Assistant</p> |
|
||||||
<p class="powered-by">Powered by AI</p> |
|
||||||
</div> |
|
||||||
<section id="messages"> |
|
||||||
<div class="loading-indicator" id="loading-indicator"> |
|
||||||
<div class="loading-dots"> |
|
||||||
<div class="loading-dot"></div> |
|
||||||
<div class="loading-dot"></div> |
|
||||||
<div class="loading-dot"></div> |
|
||||||
</div> |
|
||||||
<span>loading...</span> |
|
||||||
</div> |
|
||||||
</section> |
|
||||||
<footer> |
|
||||||
<textarea name="chat-message" placeholder="Type your message here..." disabled id="chat-message-box"></textarea> |
|
||||||
<button aria-label="Send message"> |
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" |
|
||||||
stroke="currentColor"> |
|
||||||
<path stroke-linecap="round" stroke-linejoin="round" |
|
||||||
d="M6 12 3.269 3.125A59.769 59.769 0 0 1 21.485 12 59.768 59.768 0 0 1 3.27 20.875L5.999 12Zm0 0h7.5" /> |
|
||||||
</svg> |
|
||||||
</button> |
|
||||||
</footer> |
|
||||||
</div> |
|
||||||
|
|
||||||
|
|
||||||
{{ end }} |
|
||||||
@ -1,389 +0,0 @@ |
|||||||
/** |
|
||||||
* Command Pattern Implementation |
|
||||||
*
|
|
||||||
* This system encapsulates user actions as command objects, making the codebase |
|
||||||
* more maintainable and providing a foundation for features like undo/redo. |
|
||||||
*/ |
|
||||||
|
|
||||||
import { PluginRegistry } from './pluginRegistry.js'; |
|
||||||
import { generateDefaultProps } from './utils.js'; |
|
||||||
import { ComponentNode } from './node.js'; |
|
||||||
|
|
||||||
// Base Command interface
|
|
||||||
export class Command { |
|
||||||
/** |
|
||||||
* Execute the command |
|
||||||
* @param {CanvasApp} app - The application context |
|
||||||
*/ |
|
||||||
execute(app) { |
|
||||||
throw new Error('Command must implement execute() method'); |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Optional: Undo the command (for future undo/redo system) |
|
||||||
* @param {CanvasApp} app - The application context |
|
||||||
*/ |
|
||||||
undo(app) { |
|
||||||
// Optional implementation - most commands won't need this initially
|
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Optional: Get command description for logging/debugging |
|
||||||
*/ |
|
||||||
getDescription() { |
|
||||||
return this.constructor.name; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Command Invoker - manages command execution and history
|
|
||||||
export class CommandInvoker { |
|
||||||
constructor(app) { |
|
||||||
this.app = app; |
|
||||||
this.history = []; |
|
||||||
this.maxHistorySize = 100; // Prevent memory leaks
|
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Execute a command and add it to history |
|
||||||
* @param {Command} command
|
|
||||||
*/ |
|
||||||
execute(command) { |
|
||||||
try { |
|
||||||
command.execute(this.app); |
|
||||||
|
|
||||||
// Add to history (for future undo system)
|
|
||||||
this.history.push(command); |
|
||||||
if (this.history.length > this.maxHistorySize) { |
|
||||||
this.history.shift(); |
|
||||||
} |
|
||||||
} catch (error) { |
|
||||||
throw error; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Future: Undo last command |
|
||||||
*/ |
|
||||||
undo() { |
|
||||||
if (this.history.length === 0) return; |
|
||||||
|
|
||||||
const command = this.history.pop(); |
|
||||||
if (command.undo) { |
|
||||||
command.undo(this.app); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Get command history for debugging |
|
||||||
*/ |
|
||||||
getHistory() { |
|
||||||
return this.history.map(cmd => cmd.getDescription()); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// TAB NAVIGATION COMMANDS
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export class SwitchToResourcesTabCommand extends Command { |
|
||||||
execute(app) { |
|
||||||
const requirementstab = app.tabs[1]; |
|
||||||
const resourcestab = app.tabs[2]; |
|
||||||
|
|
||||||
requirementstab.checked = false; |
|
||||||
resourcestab.checked = true; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
export class SwitchToDesignTabCommand extends Command { |
|
||||||
execute(app) { |
|
||||||
const requirementstab = app.tabs[1]; |
|
||||||
const designtab = app.tabs[1]; // Note: This looks like a bug in original - should be tabs[0]?
|
|
||||||
|
|
||||||
requirementstab.checked = false; |
|
||||||
designtab.checked = true; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// TOOL COMMANDS
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export class ToggleArrowModeCommand extends Command { |
|
||||||
execute(app) { |
|
||||||
app.arrowMode = !app.arrowMode; |
|
||||||
|
|
||||||
if (app.arrowMode) { |
|
||||||
app.arrowToolBtn.classList.add('active'); |
|
||||||
// Use observer to notify that arrow mode is enabled (will hide props panel)
|
|
||||||
app.connectionModeSubject.notifyConnectionModeChanged(true); |
|
||||||
} else { |
|
||||||
app.arrowToolBtn.classList.remove('active'); |
|
||||||
if (app.connectionStart) { |
|
||||||
app.connectionStart.group.classList.remove('selected'); |
|
||||||
app.connectionStart = null; |
|
||||||
} |
|
||||||
// Use observer to notify that arrow mode is disabled
|
|
||||||
app.connectionModeSubject.notifyConnectionModeChanged(false); |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// CHAT COMMANDS
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export class StartChatCommand extends Command { |
|
||||||
execute(app) { |
|
||||||
const scheme = location.protocol === "https:" ? "wss://" : "ws://"; |
|
||||||
|
|
||||||
app.ws = new WebSocket(scheme + location.host + "/ws"); |
|
||||||
|
|
||||||
app.ws.onopen = () => { |
|
||||||
app.ws.send(JSON.stringify({ |
|
||||||
'designPayload': JSON.stringify(app.exportDesign()), |
|
||||||
'message': '' |
|
||||||
})); |
|
||||||
}; |
|
||||||
|
|
||||||
app.ws.onmessage = (e) => { |
|
||||||
app.chatLoadingIndicator.style.display = 'none'; |
|
||||||
app.chatTextField.disabled = false; |
|
||||||
app.chatTextField.focus(); |
|
||||||
const message = document.createElement('p'); |
|
||||||
message.innerHTML = e.data; |
|
||||||
message.className = "other"; |
|
||||||
app.chatMessages.insertBefore(message, app.chatLoadingIndicator); |
|
||||||
}; |
|
||||||
|
|
||||||
app.ws.onerror = (err) => { |
|
||||||
console.log("ws error:", err); |
|
||||||
app._scheduleReconnect(); |
|
||||||
}; |
|
||||||
|
|
||||||
app.ws.onclose = () => { |
|
||||||
console.log("leaving chat..."); |
|
||||||
app.ws = null; |
|
||||||
app._scheduleReconnect(); |
|
||||||
}; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
export class SendChatMessageCommand extends Command { |
|
||||||
constructor(message) { |
|
||||||
super(); |
|
||||||
this.message = message; |
|
||||||
} |
|
||||||
|
|
||||||
execute(app) { |
|
||||||
const messageElement = document.createElement('p'); |
|
||||||
messageElement.innerHTML = this.message; |
|
||||||
messageElement.className = "me"; |
|
||||||
app.chatMessages.insertBefore(messageElement, app.chatLoadingIndicator); |
|
||||||
|
|
||||||
app.ws.send(JSON.stringify({ |
|
||||||
'message': this.message, |
|
||||||
'designPayload': JSON.stringify(app.exportDesign()), |
|
||||||
})); |
|
||||||
|
|
||||||
app.chatTextField.value = ''; |
|
||||||
app.chatLoadingIndicator.style.display = 'block'; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// DRAG & DROP COMMANDS
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export class HandleDragStartCommand extends Command { |
|
||||||
constructor(event) { |
|
||||||
super(); |
|
||||||
this.event = event; |
|
||||||
} |
|
||||||
|
|
||||||
execute(app) { |
|
||||||
const type = this.event.target.getAttribute('data-type'); |
|
||||||
const plugin = PluginRegistry.get(type); |
|
||||||
|
|
||||||
if (!plugin) return; |
|
||||||
|
|
||||||
this.event.dataTransfer.setData('text/plain', type); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
export class HandleDragEndCommand extends Command { |
|
||||||
constructor(event) { |
|
||||||
super(); |
|
||||||
this.event = event; |
|
||||||
} |
|
||||||
|
|
||||||
execute(app) { |
|
||||||
if (this.event.target.classList.contains('component-icon')) { |
|
||||||
this.event.target.classList.remove('dragging'); |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
export class DropComponentCommand extends Command { |
|
||||||
constructor(event) { |
|
||||||
super(); |
|
||||||
this.event = event; |
|
||||||
} |
|
||||||
|
|
||||||
execute(app) { |
|
||||||
const type = this.event.dataTransfer.getData('text/plain'); |
|
||||||
const plugin = PluginRegistry.get(type); |
|
||||||
if (!plugin) return; |
|
||||||
|
|
||||||
const pt = app.canvas.createSVGPoint(); |
|
||||||
pt.x = this.event.clientX; |
|
||||||
pt.y = this.event.clientY; |
|
||||||
|
|
||||||
const svgP = pt.matrixTransform(app.canvas.getScreenCTM().inverse()); |
|
||||||
const x = svgP.x - app.componentSize.width / 2; |
|
||||||
const y = svgP.y - app.componentSize.height / 2; |
|
||||||
|
|
||||||
const props = generateDefaultProps(plugin); |
|
||||||
const node = new ComponentNode(type, x, y, app, props); |
|
||||||
node.x = x; |
|
||||||
node.y = y; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// SIMULATION COMMANDS
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export class RunSimulationCommand extends Command { |
|
||||||
async execute(app) { |
|
||||||
const designData = app.exportDesign(); |
|
||||||
|
|
||||||
// Try to get level info from URL or page context
|
|
||||||
const levelInfo = app.getLevelInfo(); |
|
||||||
|
|
||||||
const requestBody = { |
|
||||||
design: designData, |
|
||||||
...levelInfo |
|
||||||
}; |
|
||||||
|
|
||||||
console.log('Sending design to simulation:', JSON.stringify(requestBody)); |
|
||||||
|
|
||||||
// Disable button and show loading state
|
|
||||||
app.runButton.disabled = true; |
|
||||||
app.runButton.textContent = 'Running Simulation...'; |
|
||||||
|
|
||||||
try { |
|
||||||
const response = await fetch('/simulate', { |
|
||||||
method: 'POST', |
|
||||||
headers: { |
|
||||||
'Content-Type': 'application/json', |
|
||||||
}, |
|
||||||
body: JSON.stringify(requestBody) |
|
||||||
}); |
|
||||||
|
|
||||||
if (!response.ok) { |
|
||||||
throw new Error(`HTTP ${response.status}: ${response.statusText}`); |
|
||||||
} |
|
||||||
|
|
||||||
// Check if response is a redirect (status 303)
|
|
||||||
if (response.redirected || response.status === 303) { |
|
||||||
// Follow the redirect to the result page
|
|
||||||
window.location.href = response.url; |
|
||||||
return; |
|
||||||
} |
|
||||||
|
|
||||||
// If we get here, something went wrong - the server should always redirect
|
|
||||||
console.error('Unexpected response from server - expected redirect but got:', response.status); |
|
||||||
app.showError('Unexpected server response. Please try again.'); |
|
||||||
|
|
||||||
} catch (error) { |
|
||||||
console.error('Network error:', error); |
|
||||||
app.showError('Failed to run simulation: ' + error.message); |
|
||||||
} finally { |
|
||||||
// Re-enable button
|
|
||||||
app.runButton.disabled = false; |
|
||||||
app.runButton.textContent = 'Test Design'; |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// =============================================================================
|
|
||||||
// CANVAS INTERACTION COMMANDS
|
|
||||||
// =============================================================================
|
|
||||||
|
|
||||||
export class HandleCanvasClickCommand extends Command { |
|
||||||
constructor(event) { |
|
||||||
super(); |
|
||||||
this.event = event; |
|
||||||
} |
|
||||||
|
|
||||||
execute(app) { |
|
||||||
// Delegate to current state
|
|
||||||
app.stateMachine.handleCanvasClick(this.event); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
export class SaveNodePropertiesCommand extends Command { |
|
||||||
execute(app) { |
|
||||||
if (!app.activeNode) return; |
|
||||||
|
|
||||||
const node = app.activeNode; |
|
||||||
const panel = app.nodePropsPanel; |
|
||||||
const plugin = PluginRegistry.get(node.type); |
|
||||||
|
|
||||||
if (!plugin || !plugin.props) { |
|
||||||
return; |
|
||||||
} |
|
||||||
|
|
||||||
// Loop through plugin-defined props and update the node
|
|
||||||
for (const prop of plugin.props) { |
|
||||||
const input = panel.querySelector(`[name='${prop.name}']`); |
|
||||||
if (!input) continue; |
|
||||||
|
|
||||||
let value; |
|
||||||
if (prop.type === 'number') { |
|
||||||
value = parseFloat(input.value); |
|
||||||
if (isNaN(value)) value = prop.default ?? 0; |
|
||||||
} else { |
|
||||||
value = input.value; |
|
||||||
} |
|
||||||
|
|
||||||
node.props[prop.name] = value; |
|
||||||
if (prop.name === 'label') { |
|
||||||
node.updateLabel(value); |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
export class DeleteSelectionCommand extends Command { |
|
||||||
constructor(key) { |
|
||||||
super(); |
|
||||||
this.key = key; |
|
||||||
} |
|
||||||
|
|
||||||
execute(app) { |
|
||||||
if (this.key === 'Backspace' || this.key === 'Delete') { |
|
||||||
if (app.selectedConnection) { |
|
||||||
app.canvas.removeChild(app.selectedConnection.line); |
|
||||||
app.canvas.removeChild(app.selectedConnection.text); |
|
||||||
const index = app.connections.indexOf(app.selectedConnection); |
|
||||||
if (index !== -1) app.connections.splice(index, 1); |
|
||||||
app.selectedConnection = null; |
|
||||||
} else if (app.selectedNode) { |
|
||||||
app.canvas.removeChild(app.selectedNode.group); |
|
||||||
app.placedComponents = app.placedComponents.filter(n => n !== app.selectedNode); |
|
||||||
app.connections = app.connections.filter(conn => { |
|
||||||
if (conn.start === app.selectedNode || conn.end === app.selectedNode) { |
|
||||||
app.canvas.removeChild(conn.line); |
|
||||||
app.canvas.removeChild(conn.text); |
|
||||||
return false; |
|
||||||
} |
|
||||||
return true; |
|
||||||
}); |
|
||||||
app.selectedNode = null; |
|
||||||
app.activeNode = null; |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,19 +0,0 @@ |
|||||||
{{ define "header" }} |
|
||||||
<div id="sd-header"> |
|
||||||
<div class="header-logo-container"> |
|
||||||
<h1 class="header-text">System Design Game</h1> |
|
||||||
<div class="beta-pill">BETA</div> |
|
||||||
</div> |
|
||||||
{{ if and .Username .Avatar }} |
|
||||||
<div class="userbox"> |
|
||||||
<img src="{{ .Avatar }}" class="avatar" /> |
|
||||||
<span class="username">{{ .Username }}</span> |
|
||||||
</div> |
|
||||||
{{ else }} |
|
||||||
<a href="/login" id="github-login-btn"> |
|
||||||
<img src="https://cdn.jsdelivr.net/gh/devicons/devicon/icons/github/github-original.svg" alt="GitHub Logo"> |
|
||||||
Login with GitHub |
|
||||||
</a> |
|
||||||
{{ end }} |
|
||||||
</div> |
|
||||||
{{ end }} |
|
||||||
@ -1,411 +0,0 @@ |
|||||||
/** |
|
||||||
* Dedicated Observer Pattern Implementation |
|
||||||
*
|
|
||||||
* Each observer is dedicated to a particular concern and is type-safe. |
|
||||||
* This provides clean separation of concerns and maintainable event handling. |
|
||||||
*/ |
|
||||||
|
|
||||||
import { PluginRegistry } from './pluginRegistry.js'; |
|
||||||
|
|
||||||
/** |
|
||||||
* NodeSelectionObserver - Dedicated to node selection events only |
|
||||||
*/ |
|
||||||
export class NodeSelectionObserver { |
|
||||||
constructor() { |
|
||||||
this.observers = []; |
|
||||||
} |
|
||||||
|
|
||||||
// Add a specific observer for node selection changes
|
|
||||||
addObserver(observer) { |
|
||||||
if (typeof observer.onNodeSelected !== 'function' || |
|
||||||
typeof observer.onNodeDeselected !== 'function') { |
|
||||||
throw new Error('Observer must implement onNodeSelected and onNodeDeselected methods'); |
|
||||||
} |
|
||||||
this.observers.push(observer); |
|
||||||
} |
|
||||||
|
|
||||||
removeObserver(observer) { |
|
||||||
const index = this.observers.indexOf(observer); |
|
||||||
if (index !== -1) { |
|
||||||
this.observers.splice(index, 1); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Notify all observers about node selection
|
|
||||||
notifyNodeSelected(node) { |
|
||||||
for (const observer of this.observers) { |
|
||||||
observer.onNodeSelected(node); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Notify all observers about node deselection
|
|
||||||
notifyNodeDeselected(node) { |
|
||||||
for (const observer of this.observers) { |
|
||||||
observer.onNodeDeselected(node); |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* PropertiesPanelObserver - Dedicated to properties panel events only |
|
||||||
*/ |
|
||||||
export class PropertiesPanelObserver { |
|
||||||
constructor() { |
|
||||||
this.observers = []; |
|
||||||
} |
|
||||||
|
|
||||||
addObserver(observer) { |
|
||||||
if (typeof observer.onPropertiesPanelRequested !== 'function') { |
|
||||||
throw new Error('Observer must implement onPropertiesPanelRequested method'); |
|
||||||
} |
|
||||||
this.observers.push(observer); |
|
||||||
} |
|
||||||
|
|
||||||
removeObserver(observer) { |
|
||||||
const index = this.observers.indexOf(observer); |
|
||||||
if (index !== -1) { |
|
||||||
this.observers.splice(index, 1); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
notifyPropertiesPanelRequested(node) { |
|
||||||
for (const observer of this.observers) { |
|
||||||
observer.onPropertiesPanelRequested(node); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
notifyPropertiesPanelClosed(node) { |
|
||||||
for (const observer of this.observers) { |
|
||||||
if (observer.onPropertiesPanelClosed) { |
|
||||||
observer.onPropertiesPanelClosed(node); |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* ConnectionModeObserver - Dedicated to connection/arrow mode events |
|
||||||
*/ |
|
||||||
export class ConnectionModeObserver { |
|
||||||
constructor() { |
|
||||||
this.observers = []; |
|
||||||
} |
|
||||||
|
|
||||||
addObserver(observer) { |
|
||||||
if (typeof observer.onConnectionModeChanged !== 'function') { |
|
||||||
throw new Error('Observer must implement onConnectionModeChanged method'); |
|
||||||
} |
|
||||||
this.observers.push(observer); |
|
||||||
} |
|
||||||
|
|
||||||
removeObserver(observer) { |
|
||||||
const index = this.observers.indexOf(observer); |
|
||||||
if (index !== -1) { |
|
||||||
this.observers.splice(index, 1); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
notifyConnectionModeChanged(isEnabled) { |
|
||||||
for (const observer of this.observers) { |
|
||||||
observer.onConnectionModeChanged(isEnabled); |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Properties Panel Manager - Implements observer interfaces |
|
||||||
*/ |
|
||||||
export class PropertiesPanelManager { |
|
||||||
constructor(panelElement, saveButton) { |
|
||||||
this.panel = panelElement; |
|
||||||
this.saveButton = saveButton; |
|
||||||
this.activeNode = null; |
|
||||||
|
|
||||||
this.setupDOMEventListeners(); |
|
||||||
} |
|
||||||
|
|
||||||
// Implement the observer interface for properties panel events
|
|
||||||
onPropertiesPanelRequested(node) { |
|
||||||
const plugin = PluginRegistry.get(node.type); |
|
||||||
|
|
||||||
if (!plugin) { |
|
||||||
this.hidePanel(); |
|
||||||
return; |
|
||||||
} |
|
||||||
|
|
||||||
this.showPanel(node, plugin); |
|
||||||
} |
|
||||||
|
|
||||||
onPropertiesPanelClosed(node) { |
|
||||||
if (this.activeNode === node) { |
|
||||||
this.hidePanel(); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Implement the observer interface for connection mode events
|
|
||||||
onConnectionModeChanged(isEnabled) { |
|
||||||
if (isEnabled && this.activeNode) { |
|
||||||
this.hidePanel(); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Implement the observer interface for node selection events
|
|
||||||
onNodeSelected(node) { |
|
||||||
// Properties panel doesn't need to do anything special when nodes are selected
|
|
||||||
// The double-click handler takes care of showing the panel
|
|
||||||
} |
|
||||||
|
|
||||||
onNodeDeselected(node) { |
|
||||||
// When a node is deselected, close the properties panel if it's for that node
|
|
||||||
if (this.activeNode === node) { |
|
||||||
this.hidePanel(); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
setupDOMEventListeners() { |
|
||||||
this.saveButton.addEventListener('click', () => { |
|
||||||
this.saveProperties(); |
|
||||||
}); |
|
||||||
|
|
||||||
this.panel.addEventListener('click', (e) => { |
|
||||||
e.stopPropagation(); |
|
||||||
}); |
|
||||||
} |
|
||||||
|
|
||||||
showPanel(node, plugin) { |
|
||||||
this.activeNode = node; |
|
||||||
|
|
||||||
// Calculate position for optimal placement
|
|
||||||
const nodeRect = node.group.getBoundingClientRect(); |
|
||||||
const panelWidth = 220; |
|
||||||
const panelHeight = 400; |
|
||||||
|
|
||||||
let dialogX = nodeRect.right + 10; |
|
||||||
let dialogY = nodeRect.top; |
|
||||||
|
|
||||||
if (dialogX + panelWidth > window.innerWidth) { |
|
||||||
dialogX = nodeRect.left - panelWidth - 10; |
|
||||||
} |
|
||||||
|
|
||||||
if (dialogY + panelHeight > window.innerHeight) { |
|
||||||
dialogY = window.innerHeight - panelHeight - 10; |
|
||||||
} |
|
||||||
|
|
||||||
if (dialogY < 10) { |
|
||||||
dialogY = 10; |
|
||||||
} |
|
||||||
|
|
||||||
this.panel.style.left = dialogX + 'px'; |
|
||||||
this.panel.style.top = dialogY + 'px'; |
|
||||||
|
|
||||||
// Hide all groups first
|
|
||||||
const allGroups = this.panel.querySelectorAll('.prop-group, #label-group, #compute-group, #lb-group'); |
|
||||||
allGroups.forEach(g => g.style.display = 'none'); |
|
||||||
|
|
||||||
const shownGroups = new Set(); |
|
||||||
|
|
||||||
// Set up properties based on plugin definition
|
|
||||||
for (const prop of plugin.props) { |
|
||||||
const group = this.panel.querySelector(`[data-group='${prop.group}']`); |
|
||||||
const input = this.panel.querySelector(`[name='${prop.name}']`); |
|
||||||
|
|
||||||
// Show group once
|
|
||||||
if (group && !shownGroups.has(group)) { |
|
||||||
group.style.display = 'block'; |
|
||||||
shownGroups.add(group); |
|
||||||
} |
|
||||||
|
|
||||||
// Set value
|
|
||||||
if (input) { |
|
||||||
input.value = node.props[prop.name] ?? prop.default; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
this.saveButton.disabled = false; |
|
||||||
this.panel.style.display = 'block'; |
|
||||||
|
|
||||||
setTimeout(() => { |
|
||||||
this.panel.classList.add('visible'); |
|
||||||
}, 10); |
|
||||||
} |
|
||||||
|
|
||||||
hidePanel() { |
|
||||||
if (!this.activeNode) return; |
|
||||||
|
|
||||||
this.panel.classList.remove('visible'); |
|
||||||
|
|
||||||
setTimeout(() => { |
|
||||||
this.panel.style.display = 'none'; |
|
||||||
}, 200); |
|
||||||
|
|
||||||
this.activeNode = null; |
|
||||||
} |
|
||||||
|
|
||||||
saveProperties() { |
|
||||||
if (!this.activeNode) return; |
|
||||||
|
|
||||||
const node = this.activeNode; |
|
||||||
const panel = this.panel; |
|
||||||
const plugin = PluginRegistry.get(node.type); |
|
||||||
|
|
||||||
if (!plugin || !plugin.props) { |
|
||||||
this.hidePanel(); |
|
||||||
return; |
|
||||||
} |
|
||||||
|
|
||||||
// Loop through plugin-defined props and update the node
|
|
||||||
for (const prop of plugin.props) { |
|
||||||
const input = panel.querySelector(`[name='${prop.name}']`); |
|
||||||
if (!input) continue; |
|
||||||
|
|
||||||
let value; |
|
||||||
if (prop.type === 'number') { |
|
||||||
value = parseFloat(input.value); |
|
||||||
if (isNaN(value)) value = prop.default ?? 0; |
|
||||||
} else { |
|
||||||
value = input.value; |
|
||||||
} |
|
||||||
|
|
||||||
node.props[prop.name] = value; |
|
||||||
if (prop.name === 'label') { |
|
||||||
node.updateLabel(value); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
this.hidePanel(); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Selection Manager - Implements node selection observer interface |
|
||||||
*/ |
|
||||||
export class SelectionManager { |
|
||||||
constructor() { |
|
||||||
this.selectedNode = null; |
|
||||||
this.selectedConnection = null; |
|
||||||
} |
|
||||||
|
|
||||||
// Implement the observer interface for node selection
|
|
||||||
onNodeSelected(node) { |
|
||||||
this.clearSelection(); |
|
||||||
this.selectedNode = node; |
|
||||||
node.select(); // Visual feedback
|
|
||||||
} |
|
||||||
|
|
||||||
onNodeDeselected(node) { |
|
||||||
if (this.selectedNode === node) { |
|
||||||
node.deselect(); |
|
||||||
this.selectedNode = null; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
clearSelection() { |
|
||||||
if (this.selectedNode) { |
|
||||||
this.selectedNode.deselect(); |
|
||||||
this.selectedNode = null; |
|
||||||
} |
|
||||||
|
|
||||||
if (this.selectedConnection) { |
|
||||||
this.selectedConnection.deselect(); |
|
||||||
this.selectedConnection = null; |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Initialize the observer system |
|
||||||
*/ |
|
||||||
export function initializeObservers(nodePropsPanel, propsSaveBtn) { |
|
||||||
// Create the specific observers (subjects)
|
|
||||||
const nodeSelectionSubject = new NodeSelectionObserver(); |
|
||||||
const propertiesPanelSubject = new PropertiesPanelObserver(); |
|
||||||
const connectionModeSubject = new ConnectionModeObserver(); |
|
||||||
|
|
||||||
// Create the specific observers (listeners)
|
|
||||||
const propertiesPanel = new PropertiesPanelManager(nodePropsPanel, propsSaveBtn); |
|
||||||
|
|
||||||
const selectionManager = new SelectionManager(); |
|
||||||
|
|
||||||
// Wire them together - each observer registers for what it cares about
|
|
||||||
nodeSelectionSubject.addObserver(selectionManager); |
|
||||||
nodeSelectionSubject.addObserver(propertiesPanel); // Properties panel cares about selection too
|
|
||||||
|
|
||||||
propertiesPanelSubject.addObserver(propertiesPanel); |
|
||||||
|
|
||||||
connectionModeSubject.addObserver(propertiesPanel); // Panel hides when arrow mode enabled
|
|
||||||
|
|
||||||
// Return the subjects so the main app can notify them
|
|
||||||
return { |
|
||||||
nodeSelection: nodeSelectionSubject, |
|
||||||
propertiesPanel: propertiesPanelSubject, |
|
||||||
connectionMode: connectionModeSubject |
|
||||||
}; |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* How the main CanvasApp would use these observers |
|
||||||
*/ |
|
||||||
export class CanvasAppWithObservers { |
|
||||||
constructor() { |
|
||||||
// Initialize observers
|
|
||||||
const observers = initializeObservers(); |
|
||||||
this.nodeSelectionSubject = observers.nodeSelection; |
|
||||||
this.propertiesPanelSubject = observers.propertiesPanel; |
|
||||||
this.connectionModeSubject = observers.connectionMode; |
|
||||||
|
|
||||||
this.selectedNode = null; |
|
||||||
this.arrowMode = false; |
|
||||||
|
|
||||||
this.setupEventListeners(); |
|
||||||
} |
|
||||||
|
|
||||||
setupEventListeners() { |
|
||||||
// Canvas click - clear selection
|
|
||||||
this.canvas.addEventListener('click', (e) => { |
|
||||||
if (e.detail > 1) return; // Ignore double-clicks
|
|
||||||
|
|
||||||
if (this.selectedNode) { |
|
||||||
this.nodeSelectionSubject.notifyNodeDeselected(this.selectedNode); |
|
||||||
this.selectedNode = null; |
|
||||||
} |
|
||||||
}); |
|
||||||
|
|
||||||
// Arrow mode toggle
|
|
||||||
this.arrowToolBtn.addEventListener('click', () => { |
|
||||||
this.arrowMode = !this.arrowMode; |
|
||||||
this.connectionModeSubject.notifyConnectionModeChanged(this.arrowMode); |
|
||||||
}); |
|
||||||
} |
|
||||||
|
|
||||||
// When a node is double-clicked
|
|
||||||
onNodeDoubleClick(node) { |
|
||||||
if (!this.arrowMode) { |
|
||||||
this.propertiesPanelSubject.notifyPropertiesPanelRequested(node); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// When a node is single-clicked
|
|
||||||
onNodeSingleClick(node) { |
|
||||||
if (this.selectedNode !== node) { |
|
||||||
if (this.selectedNode) { |
|
||||||
this.nodeSelectionSubject.notifyNodeDeselected(this.selectedNode); |
|
||||||
} |
|
||||||
this.selectedNode = node; |
|
||||||
this.nodeSelectionSubject.notifyNodeSelected(node); |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Key Benefits of This Approach: |
|
||||||
*
|
|
||||||
* 1. TYPE SAFETY: Each observer has a specific interface |
|
||||||
* 2. SINGLE RESPONSIBILITY: Each observer handles ONE concern
|
|
||||||
* 3. NO MAGIC STRINGS: No event type constants that can be mistyped |
|
||||||
* 4. COMPILE-TIME CHECKING: TypeScript/IDE can validate observer interfaces |
|
||||||
* 5. FOCUSED: PropertiesPanelObserver only knows about properties panels |
|
||||||
* 6. TESTABLE: Each observer can be tested with mock implementations |
|
||||||
*
|
|
||||||
* This approach is more verbose but much safer and clearer about |
|
||||||
* what each component is responsible for. |
|
||||||
*/ |
|
||||||
@ -1,156 +0,0 @@ |
|||||||
/** |
|
||||||
* Base Canvas State - Nystrom's State Pattern Implementation |
|
||||||
*
|
|
||||||
* This abstract base class defines the interface that all canvas states must implement. |
|
||||||
* Each state handles user interactions differently, eliminating the need for mode checking. |
|
||||||
*/ |
|
||||||
|
|
||||||
export class CanvasState { |
|
||||||
/** |
|
||||||
* Called when entering this state |
|
||||||
* @param {CanvasApp} app - The canvas application context |
|
||||||
*/ |
|
||||||
enter(app) { |
|
||||||
// Override in concrete states
|
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Called when exiting this state
|
|
||||||
* @param {CanvasApp} app - The canvas application context |
|
||||||
*/ |
|
||||||
exit(app) { |
|
||||||
// Override in concrete states
|
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Handle clicks on the canvas background |
|
||||||
* @param {CanvasApp} app - The canvas application context |
|
||||||
* @param {MouseEvent} event - The click event |
|
||||||
*/ |
|
||||||
handleCanvasClick(app, event) { |
|
||||||
// Default: clear selections
|
|
||||||
if (event.detail > 1) return; // Ignore double-clicks
|
|
||||||
|
|
||||||
// Clear any connection start
|
|
||||||
if (app.connectionStart) { |
|
||||||
app.connectionStart.group.classList.remove('selected'); |
|
||||||
app.connectionStart = null; |
|
||||||
} |
|
||||||
|
|
||||||
// Clear node selection via observer
|
|
||||||
if (app.selectedNode) { |
|
||||||
app.nodeSelectionSubject.notifyNodeDeselected(app.selectedNode); |
|
||||||
app.selectedNode = null; |
|
||||||
} |
|
||||||
|
|
||||||
// Clear connection selection
|
|
||||||
if (app.selectedConnection) { |
|
||||||
app.selectedConnection.deselect(); |
|
||||||
app.selectedConnection = null; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Handle single clicks on nodes |
|
||||||
* @param {CanvasApp} app - The canvas application context
|
|
||||||
* @param {ComponentNode} node - The clicked node |
|
||||||
* @param {MouseEvent} event - The click event |
|
||||||
*/ |
|
||||||
handleNodeClick(app, node, event) { |
|
||||||
// Override in concrete states
|
|
||||||
throw new Error(`${this.constructor.name} must implement handleNodeClick()`); |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Handle double clicks on nodes |
|
||||||
* @param {CanvasApp} app - The canvas application context |
|
||||||
* @param {ComponentNode} node - The double-clicked node
|
|
||||||
*/ |
|
||||||
handleNodeDoubleClick(app, node) { |
|
||||||
// Override in concrete states
|
|
||||||
throw new Error(`${this.constructor.name} must implement handleNodeDoubleClick()`); |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Handle component drops from sidebar |
|
||||||
* @param {CanvasApp} app - The canvas application context |
|
||||||
* @param {DragEvent} event - The drop event |
|
||||||
*/ |
|
||||||
async handleDrop(app, event) { |
|
||||||
// Default implementation - most states allow dropping
|
|
||||||
const type = event.dataTransfer.getData('text/plain'); |
|
||||||
|
|
||||||
// Import PluginRegistry dynamically to avoid circular imports
|
|
||||||
const { PluginRegistry } = await import('../pluginRegistry.js'); |
|
||||||
const plugin = PluginRegistry.get(type); |
|
||||||
if (!plugin) return; |
|
||||||
|
|
||||||
const pt = app.canvas.createSVGPoint(); |
|
||||||
pt.x = event.clientX; |
|
||||||
pt.y = event.clientY; |
|
||||||
|
|
||||||
const svgP = pt.matrixTransform(app.canvas.getScreenCTM().inverse()); |
|
||||||
const x = svgP.x - app.componentSize.width / 2; |
|
||||||
const y = svgP.y - app.componentSize.height / 2; |
|
||||||
|
|
||||||
const { generateDefaultProps } = await import('../utils.js'); |
|
||||||
const { ComponentNode } = await import('../node.js'); |
|
||||||
|
|
||||||
const props = generateDefaultProps(plugin); |
|
||||||
const node = new ComponentNode(type, x, y, app, props); |
|
||||||
node.x = x; |
|
||||||
node.y = y; |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Handle keyboard events |
|
||||||
* @param {CanvasApp} app - The canvas application context |
|
||||||
* @param {KeyboardEvent} event - The keyboard event |
|
||||||
*/ |
|
||||||
handleKeyDown(app, event) { |
|
||||||
// Default: handle delete key
|
|
||||||
if (event.key === 'Backspace' || event.key === 'Delete') { |
|
||||||
if (app.selectedConnection) { |
|
||||||
app.canvas.removeChild(app.selectedConnection.line); |
|
||||||
app.canvas.removeChild(app.selectedConnection.text); |
|
||||||
const index = app.connections.indexOf(app.selectedConnection); |
|
||||||
if (index !== -1) app.connections.splice(index, 1); |
|
||||||
app.selectedConnection = null; |
|
||||||
} else if (app.selectedNode) { |
|
||||||
app.canvas.removeChild(app.selectedNode.group); |
|
||||||
app.placedComponents = app.placedComponents.filter(n => n !== app.selectedNode); |
|
||||||
app.connections = app.connections.filter(conn => { |
|
||||||
if (conn.start === app.selectedNode || conn.end === app.selectedNode) { |
|
||||||
app.canvas.removeChild(conn.line); |
|
||||||
app.canvas.removeChild(conn.text); |
|
||||||
return false; |
|
||||||
} |
|
||||||
return true; |
|
||||||
}); |
|
||||||
app.selectedNode = null; |
|
||||||
app.activeNode = null; |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Get the display name of this state |
|
||||||
*/ |
|
||||||
getStateName() { |
|
||||||
return this.constructor.name.replace('State', ''); |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Get the cursor style for this state |
|
||||||
*/ |
|
||||||
getCursor() { |
|
||||||
return 'default'; |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Whether this state allows properties panel to open |
|
||||||
*/ |
|
||||||
allowsPropertiesPanel() { |
|
||||||
return true; |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,161 +0,0 @@ |
|||||||
/** |
|
||||||
* Canvas State Machine - Manages state transitions for the canvas |
|
||||||
*
|
|
||||||
* This class coordinates state changes and ensures proper enter/exit calls. |
|
||||||
* It follows Nystrom's State Pattern implementation guidelines. |
|
||||||
*/ |
|
||||||
|
|
||||||
import { DesignState } from './DesignState.js'; |
|
||||||
import { ConnectionState } from './ConnectionState.js'; |
|
||||||
|
|
||||||
export class CanvasStateMachine { |
|
||||||
constructor(app) { |
|
||||||
this.app = app; |
|
||||||
this.currentState = null; |
|
||||||
|
|
||||||
// Pre-create state instances for reuse
|
|
||||||
this.states = { |
|
||||||
design: new DesignState(), |
|
||||||
connection: new ConnectionState() |
|
||||||
}; |
|
||||||
|
|
||||||
// Start in design state
|
|
||||||
this.changeState('design'); |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Change to a new state |
|
||||||
* @param {string} stateName - Name of the state to change to |
|
||||||
*/ |
|
||||||
changeState(stateName) { |
|
||||||
const newState = this.states[stateName]; |
|
||||||
|
|
||||||
if (!newState) { |
|
||||||
return; |
|
||||||
} |
|
||||||
|
|
||||||
if (this.currentState === newState) { |
|
||||||
return; |
|
||||||
} |
|
||||||
|
|
||||||
// Exit current state
|
|
||||||
if (this.currentState) { |
|
||||||
this.currentState.exit(this.app); |
|
||||||
} |
|
||||||
|
|
||||||
// Enter new state
|
|
||||||
const previousState = this.currentState; |
|
||||||
this.currentState = newState; |
|
||||||
this.currentState.enter(this.app); |
|
||||||
|
|
||||||
// Notify any listeners about state change
|
|
||||||
this.onStateChanged(previousState, newState); |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Toggle between design and connection states |
|
||||||
*/ |
|
||||||
toggleConnectionMode() { |
|
||||||
const currentStateName = this.getCurrentStateName(); |
|
||||||
|
|
||||||
if (currentStateName === 'design') { |
|
||||||
this.changeState('connection'); |
|
||||||
} else { |
|
||||||
this.changeState('design'); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Get the current state name |
|
||||||
*/ |
|
||||||
getCurrentStateName() { |
|
||||||
return this.currentState ? this.currentState.getStateName().toLowerCase() : 'none'; |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Get the current state instance |
|
||||||
*/ |
|
||||||
getCurrentState() { |
|
||||||
return this.currentState; |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Check if currently in a specific state |
|
||||||
* @param {string} stateName
|
|
||||||
*/ |
|
||||||
isInState(stateName) { |
|
||||||
return this.getCurrentStateName() === stateName.toLowerCase(); |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Delegate canvas click to current state |
|
||||||
*/ |
|
||||||
handleCanvasClick(event) { |
|
||||||
if (this.currentState) { |
|
||||||
this.currentState.handleCanvasClick(this.app, event); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Delegate node click to current state |
|
||||||
*/ |
|
||||||
handleNodeClick(node, event) { |
|
||||||
if (this.currentState) { |
|
||||||
this.currentState.handleNodeClick(this.app, node, event); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Delegate node double-click to current state |
|
||||||
*/ |
|
||||||
handleNodeDoubleClick(node) { |
|
||||||
if (this.currentState) { |
|
||||||
this.currentState.handleNodeDoubleClick(this.app, node); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Delegate drop event to current state |
|
||||||
*/ |
|
||||||
handleDrop(event) { |
|
||||||
if (this.currentState) { |
|
||||||
this.currentState.handleDrop(this.app, event); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Delegate keyboard event to current state |
|
||||||
*/ |
|
||||||
handleKeyDown(event) { |
|
||||||
if (this.currentState) { |
|
||||||
this.currentState.handleKeyDown(this.app, event); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Called when state changes - override for custom behavior |
|
||||||
*/ |
|
||||||
onStateChanged(previousState, newState) { |
|
||||||
// Could emit events, update analytics, etc.
|
|
||||||
|
|
||||||
// Update any debug UI
|
|
||||||
if (this.app.debugStateDisplay) { |
|
||||||
this.app.debugStateDisplay.textContent = `State: ${newState.getStateName()}`; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Get available states for debugging/UI |
|
||||||
*/ |
|
||||||
getAvailableStates() { |
|
||||||
return Object.keys(this.states); |
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Force change to design state (safe reset) |
|
||||||
*/ |
|
||||||
resetToDesignState() { |
|
||||||
this.changeState('design'); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
@ -1,113 +0,0 @@ |
|||||||
/** |
|
||||||
* Connection State - Arrow mode for connecting components |
|
||||||
*
|
|
||||||
* In this state, users can: |
|
||||||
* - Click nodes to start/end connections |
|
||||||
* - See visual feedback for connection process |
|
||||||
* - Cannot edit properties (properties panel disabled) |
|
||||||
*/ |
|
||||||
|
|
||||||
import { CanvasState } from './CanvasState.js'; |
|
||||||
import { Connection } from '../connection.js'; |
|
||||||
|
|
||||||
export class ConnectionState extends CanvasState { |
|
||||||
enter(app) { |
|
||||||
super.enter(app); |
|
||||||
|
|
||||||
// Update UI to reflect connection mode
|
|
||||||
app.arrowToolBtn.classList.add('active'); |
|
||||||
app.canvas.style.cursor = this.getCursor(); |
|
||||||
|
|
||||||
// Hide properties panel (connection mode disables editing)
|
|
||||||
if (app.selectedNode) { |
|
||||||
app.propertiesPanelSubject.notifyPropertiesPanelClosed(app.selectedNode); |
|
||||||
} |
|
||||||
|
|
||||||
// Notify observers that connection mode is enabled
|
|
||||||
app.connectionModeSubject.notifyConnectionModeChanged(true); |
|
||||||
} |
|
||||||
|
|
||||||
exit(app) { |
|
||||||
super.exit(app); |
|
||||||
|
|
||||||
// Clear any pending connection
|
|
||||||
if (app.connectionStart) { |
|
||||||
app.connectionStart.group.classList.remove('selected'); |
|
||||||
app.connectionStart = null; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
handleNodeClick(app, node, event) { |
|
||||||
event.stopPropagation(); |
|
||||||
|
|
||||||
// Clear any selected connection when starting a new connection
|
|
||||||
if (app.selectedConnection) { |
|
||||||
app.selectedConnection.deselect(); |
|
||||||
app.selectedConnection = null; |
|
||||||
} |
|
||||||
|
|
||||||
if (!app.connectionStart) { |
|
||||||
// First click - start connection
|
|
||||||
app.connectionStart = node; |
|
||||||
node.group.classList.add('selected'); |
|
||||||
|
|
||||||
} else if (app.connectionStart === node) { |
|
||||||
// Clicked same node - cancel connection
|
|
||||||
app.connectionStart.group.classList.remove('selected'); |
|
||||||
app.connectionStart = null; |
|
||||||
|
|
||||||
} else { |
|
||||||
// Second click - complete connection
|
|
||||||
this.createConnection(app, app.connectionStart, node); |
|
||||||
app.connectionStart.group.classList.remove('selected'); |
|
||||||
app.connectionStart = null; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
handleNodeDoubleClick(app, node) { |
|
||||||
// In connection mode, double-click does nothing
|
|
||||||
// Properties panel is disabled in this state
|
|
||||||
} |
|
||||||
|
|
||||||
handleCanvasClick(app, event) { |
|
||||||
// Cancel any pending connection when clicking canvas
|
|
||||||
if (app.connectionStart) { |
|
||||||
app.connectionStart.group.classList.remove('selected'); |
|
||||||
app.connectionStart = null; |
|
||||||
} |
|
||||||
|
|
||||||
// Don't clear node selections in connection mode
|
|
||||||
// Users should be able to see what's selected while connecting
|
|
||||||
} |
|
||||||
|
|
||||||
/** |
|
||||||
* Create a connection between two nodes |
|
||||||
* @param {CanvasApp} app
|
|
||||||
* @param {ComponentNode} startNode
|
|
||||||
* @param {ComponentNode} endNode
|
|
||||||
*/ |
|
||||||
createConnection(app, startNode, endNode) { |
|
||||||
// Set up pending connection for modal
|
|
||||||
app.pendingConnection = { start: startNode, end: endNode }; |
|
||||||
|
|
||||||
// Setup connection modal (reuse existing modal logic)
|
|
||||||
Connection.setupModal(app); |
|
||||||
Connection.labelInput.value = 'Read traffic'; |
|
||||||
Connection.protocolInput.value = 'HTTP'; |
|
||||||
Connection.tlsCheckbox.checked = false; |
|
||||||
Connection.capacityInput.value = '1000'; |
|
||||||
Connection.modal.style.display = 'block'; |
|
||||||
} |
|
||||||
|
|
||||||
getCursor() { |
|
||||||
return 'crosshair'; |
|
||||||
} |
|
||||||
|
|
||||||
allowsPropertiesPanel() { |
|
||||||
return false; // Disable properties panel in connection mode
|
|
||||||
} |
|
||||||
|
|
||||||
getStateName() { |
|
||||||
return 'Connection'; |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,79 +0,0 @@ |
|||||||
/** |
|
||||||
* Design State - Default canvas interaction mode |
|
||||||
*
|
|
||||||
* In this state, users can: |
|
||||||
* - Place components from sidebar |
|
||||||
* - Select/deselect components |
|
||||||
* - Edit component properties |
|
||||||
* - Delete components |
|
||||||
*/ |
|
||||||
|
|
||||||
import { CanvasState } from './CanvasState.js'; |
|
||||||
|
|
||||||
export class DesignState extends CanvasState { |
|
||||||
enter(app) { |
|
||||||
super.enter(app); |
|
||||||
|
|
||||||
// Update UI to reflect design mode
|
|
||||||
app.arrowToolBtn.classList.remove('active'); |
|
||||||
app.canvas.style.cursor = this.getCursor(); |
|
||||||
|
|
||||||
// Clear any connection state
|
|
||||||
if (app.connectionStart) { |
|
||||||
app.connectionStart.group.classList.remove('selected'); |
|
||||||
app.connectionStart = null; |
|
||||||
} |
|
||||||
|
|
||||||
// Notify observers that connection mode is disabled
|
|
||||||
app.connectionModeSubject.notifyConnectionModeChanged(false); |
|
||||||
} |
|
||||||
|
|
||||||
handleNodeClick(app, node, event) { |
|
||||||
event.stopPropagation(); |
|
||||||
|
|
||||||
// Clear any selected connection when clicking a node
|
|
||||||
if (app.selectedConnection) { |
|
||||||
app.selectedConnection.deselect(); |
|
||||||
app.selectedConnection = null; |
|
||||||
} |
|
||||||
|
|
||||||
// Clear previous node selection and select this node
|
|
||||||
if (app.selectedNode && app.selectedNode !== node) { |
|
||||||
app.nodeSelectionSubject.notifyNodeDeselected(app.selectedNode); |
|
||||||
} |
|
||||||
|
|
||||||
// Select the clicked node
|
|
||||||
node.select(); |
|
||||||
app.nodeSelectionSubject.notifyNodeSelected(node); |
|
||||||
} |
|
||||||
|
|
||||||
handleNodeDoubleClick(app, node) { |
|
||||||
// Show properties panel for the node
|
|
||||||
app.propertiesPanelSubject.notifyPropertiesPanelRequested(node); |
|
||||||
} |
|
||||||
|
|
||||||
handleCanvasClick(app, event) { |
|
||||||
// Don't hide props panel if clicking on it
|
|
||||||
if (!app.nodePropsPanel.contains(event.target)) { |
|
||||||
// Use observer to notify that properties panel should be closed
|
|
||||||
if (app.selectedNode) { |
|
||||||
app.propertiesPanelSubject.notifyPropertiesPanelClosed(app.selectedNode); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Use base implementation for other clearing logic
|
|
||||||
super.handleCanvasClick(app, event); |
|
||||||
} |
|
||||||
|
|
||||||
getCursor() { |
|
||||||
return 'default'; |
|
||||||
} |
|
||||||
|
|
||||||
allowsPropertiesPanel() { |
|
||||||
return true; |
|
||||||
} |
|
||||||
|
|
||||||
getStateName() { |
|
||||||
return 'Design'; |
|
||||||
} |
|
||||||
} |
|
||||||
Loading…
Reference in new issue