Browse Source

added a bunch of comments for understanding

pull/1/head
Stephanie Gredell 7 months ago
parent
commit
cfcb154a7d
  1. 71
      internal/simulation/engine.go
  2. 36
      internal/simulation/loadbalancer.go

71
internal/simulation/engine.go

@ -6,46 +6,65 @@ import (
"systemdesigngame/internal/design" "systemdesigngame/internal/design"
) )
// a unit that flows through the system
type Request struct { type Request struct {
ID string ID string
// when a request was created
Timestamp int Timestamp int
// total time spent on system
LatencyMS int LatencyMS int
Origin string // where the request originated from (node ID)
Type string Origin string
Path []string // could be GET or POST
Type string
// records where it's been (used to prevent loops)
Path []string
} }
// Every node implements this interface and is used by the engine to operate all nodes in a uniform way.
type SimulationNode interface { type SimulationNode interface {
GetID() string GetID() string
Type() string Type() string
Tick(tick int, currentTimeMs int) Tick(tick int, currentTimeMs int) // Advance the node's state
Receive(req *Request) Receive(req *Request) // Accept new requests
Emit() []*Request Emit() []*Request
IsAlive() bool IsAlive() bool
GetTargets() []string GetTargets() []string // Connection to other nodes
GetQueue() []*Request GetQueue() []*Request // Requests currently pending
} }
type Engine struct { type Engine struct {
Nodes map[string]SimulationNode // Map of Node ID -> actual node, Represents all nodes in the graph
Nodes map[string]SimulationNode
// all tick snapshots
Timeline []*TickSnapshot Timeline []*TickSnapshot
// how many ticks to run
Duration int Duration int
TickMs int // no used here but we could use it if we want it configurable
TickMs int
} }
// what hte system looks like given a tick
type TickSnapshot struct { type TickSnapshot struct {
TickMs int TickMs int
// Queue size at each node
QueueSizes map[string]int QueueSizes map[string]int
NodeHealth map[string]NodeState NodeHealth map[string]NodeState
Emitted map[string][]*Request // what each node output that tick before routing
Emitted map[string][]*Request
} }
// used for tracking health/debugging each node at tick
type NodeState struct { type NodeState struct {
QueueSize int QueueSize int
Alive bool Alive bool
} }
// Takes a level design and produces a runnable engine from it.
func NewEngineFromDesign(design design.Design, duration int, tickMs int) *Engine { func NewEngineFromDesign(design design.Design, duration int, tickMs int) *Engine {
// Iterate over each nodes and then construct the simulation nodes
// Each constructed simulation node is then stored in the nodeMap
nodeMap := make(map[string]SimulationNode) nodeMap := make(map[string]SimulationNode)
for _, n := range design.Nodes { for _, n := range design.Nodes {
@ -179,17 +198,21 @@ func NewEngineFromDesign(design design.Design, duration int, tickMs int) *Engine
} }
func (e *Engine) Run() { func (e *Engine) Run() {
// clear and set defaults
const tickMS = 100 const tickMS = 100
currentTimeMs := 0 currentTimeMs := 0
e.Timeline = e.Timeline[:0] e.Timeline = e.Timeline[:0]
// start ticking. This is really where the simulation begins
for tick := 0; tick < e.Duration; tick++ { for tick := 0; tick < e.Duration; tick++ {
// find the entry points (where traffic enters) or else print a warning
entries := e.findEntryPoints() entries := e.findEntryPoints()
if len(entries) == 0 { if len(entries) == 0 {
fmt.Println("[ERROR] No entry points found! Simulation will not inject requests.") fmt.Println("[ERROR] No entry points found! Simulation will not inject requests.")
} }
// inject new requests // inject new requests of each entry node every tick
for _, node := range entries { for _, node := range entries {
if shouldInject(tick) { if shouldInject(tick) {
req := &Request{ req := &Request{
@ -204,22 +227,26 @@ func (e *Engine) Run() {
} }
} }
// snapshot for this tick // snapshot to record what happened this tick
snapshot := &TickSnapshot{ snapshot := &TickSnapshot{
TickMs: tick, TickMs: tick,
NodeHealth: make(map[string]NodeState), NodeHealth: make(map[string]NodeState),
} }
for id, node := range e.Nodes { for id, node := range e.Nodes {
// capture health data before processing
snapshot.NodeHealth[id] = NodeState{ snapshot.NodeHealth[id] = NodeState{
QueueSize: len(node.GetQueue()), QueueSize: len(node.GetQueue()),
Alive: node.IsAlive(), Alive: node.IsAlive(),
} }
// tick all nodes // tick all nodes
node.Tick(tick, currentTimeMs) node.Tick(tick, currentTimeMs)
// emit and forward requests to connected nodes // get all processed requets and fan it out to all connected targets
for _, req := range node.Emit() { for _, req := range node.Emit() {
snapshot.Emitted[node.GetID()] = append(snapshot.Emitted[node.GetID()], req)
for _, targetID := range node.GetTargets() { for _, targetID := range node.GetTargets() {
if target, ok := e.Nodes[targetID]; ok && target.IsAlive() && !hasVisited(req, targetID) { if target, ok := e.Nodes[targetID]; ok && target.IsAlive() && !hasVisited(req, targetID) {
// Deep copy request and update path // Deep copy request and update path
@ -233,6 +260,7 @@ func (e *Engine) Run() {
} }
// store the snapshot and advance time
e.Timeline = append(e.Timeline, snapshot) e.Timeline = append(e.Timeline, snapshot)
currentTimeMs += tickMS currentTimeMs += tickMS
} }
@ -264,11 +292,18 @@ func generateRequestID(tick int) string {
} }
func asFloat64(v interface{}) float64 { func asFloat64(v interface{}) float64 {
if v == nil { switch val := v.(type) {
case float64:
return val
case int:
return float64(val)
case int64:
return float64(val)
case float32:
return float64(val)
default:
return 0 return 0
} }
return v.(float64)
} }
func asString(v interface{}) string { func asString(v interface{}) string {

36
internal/simulation/loadbalancer.go

@ -5,13 +5,21 @@ import (
) )
type LoadBalancerNode struct { type LoadBalancerNode struct {
ID string // unique identifier for the node
Label string ID string
// human readable name
Label string
// load balancing strategy
Algorithm string Algorithm string
Queue []*Request // list of incoming requests to be processed
Targets []string Queue []*Request
Counter int // IDs of downstream nodes (e.g. webservers)
Alive bool Targets []string
// use to track round-robin state (i.e. which target is next)
Counter int
// bool for health check
Alive bool
// requests that this node has handled (ready to be emitted)
Processed []*Request Processed []*Request
} }
@ -27,39 +35,47 @@ func (lb *LoadBalancerNode) IsAlive() bool {
return lb.Alive return lb.Alive
} }
// Acceps an incoming request by adding it to the Queue which will be processed on the next tick
func (lb *LoadBalancerNode) Receive(req *Request) { func (lb *LoadBalancerNode) Receive(req *Request) {
lb.Queue = append(lb.Queue, req) lb.Queue = append(lb.Queue, req)
} }
func (lb *LoadBalancerNode) Tick(tick int, currentTimeMs int) { func (lb *LoadBalancerNode) Tick(tick int, currentTimeMs int) {
// clear out the process so it starts fresh
lb.Processed = nil lb.Processed = nil
// for each pending request...
for _, req := range lb.Queue { for _, req := range lb.Queue {
// if there are no targets to forward to, skip processing
if len(lb.Targets) == 0 { if len(lb.Targets) == 0 {
continue continue
} }
var target string // placeholder for algorithm-specific logic. TODO.
switch lb.Algorithm { switch lb.Algorithm {
case "random": case "random":
target = lb.Targets[rand.Intn(len(lb.Targets))] fallthrough
case "round-robin": case "round-robin":
fallthrough fallthrough
default: default:
target = lb.Targets[lb.Counter%len(lb.Targets)]
lb.Counter++ lb.Counter++
} }
req.Path = append([]string{target}, req.Path...) // Append the load balancer's ID to the request's path to record it's journey through the system
req.Path = append(req.Path, lb.ID)
// Simulate networking delay
req.LatencyMS += 10 req.LatencyMS += 10
// Mark the request as processed so it can be emitted to targets
lb.Processed = append(lb.Processed, req) lb.Processed = append(lb.Processed, req)
} }
// clear the queue after processing. Ready for next tick.
lb.Queue = lb.Queue[:0] lb.Queue = lb.Queue[:0]
} }
// return the list of process requests and then clear the processed requests
func (lb *LoadBalancerNode) Emit() []*Request { func (lb *LoadBalancerNode) Emit() []*Request {
out := lb.Processed out := lb.Processed
lb.Processed = nil lb.Processed = nil

Loading…
Cancel
Save