Loading documentation...
Loading documentation...
Loading documentation...
This guide covers performance optimization strategies for Ion components.
Ion is designed for high performance:
Use Go's built-in benchmarking:
# Run all benchmarks
go test -bench=. -benchmem ./...
# Run specific benchmark
go test -bench=BenchmarkCircuitBreaker_Execute -benchmem ./circuit
# Run with race detector
go test -bench=. -race ./...BenchmarkCircuitBreaker_Execute-8 10000000 150 ns/op 0 B/op 0 allocs/opCircuit breakers use atomic operations for state checks, which are very fast. However, you can optimize further:
// Good: Fast path check
if !cb.allowRequest() {
return nil, NewCircuitOpenError(cb.name)
}Keep failure predicates simple:
// Good: Simple predicate
circuit.WithFailurePredicate(func(err error) bool {
return err != nil && isServerError(err)
})
// Bad: Complex predicate with allocations
circuit.WithFailurePredicate(func(err error) bool {
return strings.Contains(err.Error(), "timeout") // Allocates
})Disable metrics if not needed:
// For high-throughput scenarios, use no-op metrics
obs := observe.New() // No-op by default
cb := circuit.New("service", circuit.WithObservability(obs))Token bucket uses mutex for synchronization. For very high throughput, consider:
// Use multiple limiters for sharding
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(rate/shardCount, burst/shardCount)
}
// Route requests to different limiters
limiter := limiters[hash(key)%shardCount]Leaky bucket is optimized for steady-state processing:
// Good: Appropriate capacity
limiter := ratelimit.NewLeakyBucket(
ratelimit.PerSecond(1000),
2000, // 2x rate for burst capacity
)Multi-tier limiter uses sync.Map for route/resource lookups:
// Pre-warm routes for better performance
for route, config := range routeConfigs {
limiter.GetRouteLimiter(route) // Pre-create
}Fairness mode affects performance:
// Maximum performance: None fairness
sem := semaphore.NewWeighted(100, semaphore.WithFairness(semaphore.None))
// Balanced: FIFO fairness (default)
sem := semaphore.NewWeighted(100, semaphore.WithFairness(semaphore.FIFO))Performance order: None > LIFO > FIFO
Use multiple semaphores for different resource types:
// Instead of one large semaphore
dbSem := semaphore.NewWeighted(10)
fileSem := semaphore.NewWeighted(5)
apiSem := semaphore.NewWeighted(20)Size workers based on workload:
// CPU-bound: Number of CPUs
pool := workerpool.New(runtime.NumCPU(), 100)
// I/O-bound: More workers
pool := workerpool.New(runtime.NumCPU()*2, 200)Size queue based on expected backlog:
// Too small: Frequent rejections
pool := workerpool.New(10, 5) // Bad
// Too large: Memory waste
pool := workerpool.New(10, 10000) // Bad
// Balanced: Based on workload
pool := workerpool.New(10, 100) // GoodMinimize task wrapper overhead:
// Good: Minimal wrapper
pool := workerpool.New(10, 100,
workerpool.WithTaskWrapper(func(task workerpool.Task) workerpool.Task {
return task // Minimal overhead
}),
)
// Bad: Heavy wrapper
pool := workerpool.New(10, 100,
workerpool.WithTaskWrapper(func(task workerpool.Task) workerpool.Task {
return func(ctx context.Context) error {
// Heavy instrumentation
start := time.Now()
defer func() {
// Complex logging/metrics
}()
return task(ctx)
}
}),
)No-op implementations have zero overhead:
// Zero overhead when not configured
obs := observe.New() // No-op logger, metrics, tracerUse structured logging efficiently:
// Good: Minimal allocations
logger.Info("message", "key", value)
// Bad: String formatting allocates
logger.Info(fmt.Sprintf("message: %v", value))Batch metrics when possible:
// Instead of individual increments
for i := 0; i < 100; i++ {
metrics.Inc("counter") // 100 calls
}
// Batch if your metrics system supports it
metrics.Add("counter", 100) // 1 callReuse components instead of creating new ones:
// Good: Reuse
var globalCB circuit.CircuitBreaker
func init() {
globalCB = circuit.New("service")
}
// Bad: Create new each time
func handler() {
cb := circuit.New("service") // Allocates
}Use object pooling for high-frequency allocations:
var requestPool = sync.Pool{
New: func() any {
return &Request{}
},
}
func getRequest() *Request {
return requestPool.Get().(*Request)
}
func putRequest(r *Request) {
r.Reset()
requestPool.Put(r)
}Use multiple components to reduce contention:
// Instead of one large component
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(rate/shardCount, burst/shardCount)
}Check conditions before expensive operations:
// Good: Fast path check
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}
// Expensive operation only if allowed
// Bad: Always do expensive work
result := expensiveOperation()
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}Identify CPU bottlenecks:
go test -cpuprofile=cpu.prof -bench=. ./...
go tool pprof cpu.profIdentify memory allocations:
go test -memprofile=mem.prof -bench=. ./...
go tool pprof mem.profAnalyze execution traces:
go test -trace=trace.out -bench=. ./...
go tool trace trace.out// Optimized configuration for high throughput
type OptimizedGateway struct {
// Sharded rate limiters
limiters []ratelimit.Limiter
// Per-route circuit breakers (reused)
circuits map[string]circuit.CircuitBreaker
// Worker pool for request processing
pool *workerpool.Pool
}
func NewOptimizedGateway() *OptimizedGateway {
// Shard rate limiters
shardCount := runtime.NumCPU()
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(
ratelimit.PerSecond(1000/shardCount),
2000/shardCount,
)
}
// Reuse circuit breakers
circuits := make(map[string]circuit.CircuitBreaker)
// Sized worker pool
pool := workerpool.New(runtime.NumCPU()*2, 200)
return &OptimizedGateway{
limiters: limiters,
circuits: circuits,
pool: pool,
}
}
func (g *OptimizedGateway) HandleRequest(ctx context.Context, req *Request) error {
// Shard rate limiting
limiter := g.limiters[hash(req.UserID)%len(g.limiters)]
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}
// Get or create circuit breaker
cb := g.getCircuitBreaker(req.Route)
// Process in worker pool
return g.pool.Submit(ctx, func(ctx context.Context) error {
_, err := cb.Execute(ctx, func(ctx context.Context) (any, error) {
return g.processRequest(ctx, req)
})
return err
})
}This guide covers performance optimization strategies for Ion components.
Ion is designed for high performance:
Use Go's built-in benchmarking:
# Run all benchmarks
go test -bench=. -benchmem ./...
# Run specific benchmark
go test -bench=BenchmarkCircuitBreaker_Execute -benchmem ./circuit
# Run with race detector
go test -bench=. -race ./...BenchmarkCircuitBreaker_Execute-8 10000000 150 ns/op 0 B/op 0 allocs/opCircuit breakers use atomic operations for state checks, which are very fast. However, you can optimize further:
// Good: Fast path check
if !cb.allowRequest() {
return nil, NewCircuitOpenError(cb.name)
}Keep failure predicates simple:
// Good: Simple predicate
circuit.WithFailurePredicate(func(err error) bool {
return err != nil && isServerError(err)
})
// Bad: Complex predicate with allocations
circuit.WithFailurePredicate(func(err error) bool {
return strings.Contains(err.Error(), "timeout") // Allocates
})Disable metrics if not needed:
// For high-throughput scenarios, use no-op metrics
obs := observe.New() // No-op by default
cb := circuit.New("service", circuit.WithObservability(obs))Token bucket uses mutex for synchronization. For very high throughput, consider:
// Use multiple limiters for sharding
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(rate/shardCount, burst/shardCount)
}
// Route requests to different limiters
limiter := limiters[hash(key)%shardCount]Leaky bucket is optimized for steady-state processing:
// Good: Appropriate capacity
limiter := ratelimit.NewLeakyBucket(
ratelimit.PerSecond(1000),
2000, // 2x rate for burst capacity
)Multi-tier limiter uses sync.Map for route/resource lookups:
// Pre-warm routes for better performance
for route, config := range routeConfigs {
limiter.GetRouteLimiter(route) // Pre-create
}Fairness mode affects performance:
// Maximum performance: None fairness
sem := semaphore.NewWeighted(100, semaphore.WithFairness(semaphore.None))
// Balanced: FIFO fairness (default)
sem := semaphore.NewWeighted(100, semaphore.WithFairness(semaphore.FIFO))Performance order: None > LIFO > FIFO
Use multiple semaphores for different resource types:
// Instead of one large semaphore
dbSem := semaphore.NewWeighted(10)
fileSem := semaphore.NewWeighted(5)
apiSem := semaphore.NewWeighted(20)Size workers based on workload:
// CPU-bound: Number of CPUs
pool := workerpool.New(runtime.NumCPU(), 100)
// I/O-bound: More workers
pool := workerpool.New(runtime.NumCPU()*2, 200)Size queue based on expected backlog:
// Too small: Frequent rejections
pool := workerpool.New(10, 5) // Bad
// Too large: Memory waste
pool := workerpool.New(10, 10000) // Bad
// Balanced: Based on workload
pool := workerpool.New(10, 100) // GoodMinimize task wrapper overhead:
// Good: Minimal wrapper
pool := workerpool.New(10, 100,
workerpool.WithTaskWrapper(func(task workerpool.Task) workerpool.Task {
return task // Minimal overhead
}),
)
// Bad: Heavy wrapper
pool := workerpool.New(10, 100,
workerpool.WithTaskWrapper(func(task workerpool.Task) workerpool.Task {
return func(ctx context.Context) error {
// Heavy instrumentation
start := time.Now()
defer func() {
// Complex logging/metrics
}()
return task(ctx)
}
}),
)No-op implementations have zero overhead:
// Zero overhead when not configured
obs := observe.New() // No-op logger, metrics, tracerUse structured logging efficiently:
// Good: Minimal allocations
logger.Info("message", "key", value)
// Bad: String formatting allocates
logger.Info(fmt.Sprintf("message: %v", value))Batch metrics when possible:
// Instead of individual increments
for i := 0; i < 100; i++ {
metrics.Inc("counter") // 100 calls
}
// Batch if your metrics system supports it
metrics.Add("counter", 100) // 1 callReuse components instead of creating new ones:
// Good: Reuse
var globalCB circuit.CircuitBreaker
func init() {
globalCB = circuit.New("service")
}
// Bad: Create new each time
func handler() {
cb := circuit.New("service") // Allocates
}Use object pooling for high-frequency allocations:
var requestPool = sync.Pool{
New: func() any {
return &Request{}
},
}
func getRequest() *Request {
return requestPool.Get().(*Request)
}
func putRequest(r *Request) {
r.Reset()
requestPool.Put(r)
}Use multiple components to reduce contention:
// Instead of one large component
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(rate/shardCount, burst/shardCount)
}Check conditions before expensive operations:
// Good: Fast path check
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}
// Expensive operation only if allowed
// Bad: Always do expensive work
result := expensiveOperation()
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}Identify CPU bottlenecks:
go test -cpuprofile=cpu.prof -bench=. ./...
go tool pprof cpu.profIdentify memory allocations:
go test -memprofile=mem.prof -bench=. ./...
go tool pprof mem.profAnalyze execution traces:
go test -trace=trace.out -bench=. ./...
go tool trace trace.out// Optimized configuration for high throughput
type OptimizedGateway struct {
// Sharded rate limiters
limiters []ratelimit.Limiter
// Per-route circuit breakers (reused)
circuits map[string]circuit.CircuitBreaker
// Worker pool for request processing
pool *workerpool.Pool
}
func NewOptimizedGateway() *OptimizedGateway {
// Shard rate limiters
shardCount := runtime.NumCPU()
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(
ratelimit.PerSecond(1000/shardCount),
2000/shardCount,
)
}
// Reuse circuit breakers
circuits := make(map[string]circuit.CircuitBreaker)
// Sized worker pool
pool := workerpool.New(runtime.NumCPU()*2, 200)
return &OptimizedGateway{
limiters: limiters,
circuits: circuits,
pool: pool,
}
}
func (g *OptimizedGateway) HandleRequest(ctx context.Context, req *Request) error {
// Shard rate limiting
limiter := g.limiters[hash(req.UserID)%len(g.limiters)]
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}
// Get or create circuit breaker
cb := g.getCircuitBreaker(req.Route)
// Process in worker pool
return g.pool.Submit(ctx, func(ctx context.Context) error {
_, err := cb.Execute(ctx, func(ctx context.Context) (any, error) {
return g.processRequest(ctx, req)
})
return err
})
}# Run all benchmarks
go test -bench=. -benchmem ./...
# Run specific benchmark
go test -bench=BenchmarkCircuitBreaker_Execute -benchmem ./circuit
# Run with race detector
go test -bench=. -race ./...BenchmarkCircuitBreaker_Execute-8 10000000 150 ns/op 0 B/op 0 allocs/op// Good: Fast path check
if !cb.allowRequest() {
return nil, NewCircuitOpenError(cb.name)
}// Good: Simple predicate
circuit.WithFailurePredicate(func(err error) bool {
return err != nil && isServerError(err)
})
// Bad: Complex predicate with allocations
circuit.WithFailurePredicate(func(err error) bool {
return strings.Contains(err.Error(), "timeout") // Allocates
})// For high-throughput scenarios, use no-op metrics
obs := observe.New() // No-op by default
cb := circuit.New("service", circuit.WithObservability(obs))// Use multiple limiters for sharding
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(rate/shardCount, burst/shardCount)
}
// Route requests to different limiters
limiter := limiters[hash(key)%shardCount]// Good: Appropriate capacity
limiter := ratelimit.NewLeakyBucket(
ratelimit.PerSecond(1000),
2000, // 2x rate for burst capacity
)// Pre-warm routes for better performance
for route, config := range routeConfigs {
limiter.GetRouteLimiter(route) // Pre-create
}// Maximum performance: None fairness
sem := semaphore.NewWeighted(100, semaphore.WithFairness(semaphore.None))
// Balanced: FIFO fairness (default)
sem := semaphore.NewWeighted(100, semaphore.WithFairness(semaphore.FIFO))// Instead of one large semaphore
dbSem := semaphore.NewWeighted(10)
fileSem := semaphore.NewWeighted(5)
apiSem := semaphore.NewWeighted(20)// CPU-bound: Number of CPUs
pool := workerpool.New(runtime.NumCPU(), 100)
// I/O-bound: More workers
pool := workerpool.New(runtime.NumCPU()*2, 200)// Too small: Frequent rejections
pool := workerpool.New(10, 5) // Bad
// Too large: Memory waste
pool := workerpool.New(10, 10000) // Bad
// Balanced: Based on workload
pool := workerpool.New(10, 100) // Good// Good: Minimal wrapper
pool := workerpool.New(10, 100,
workerpool.WithTaskWrapper(func(task workerpool.Task) workerpool.Task {
return task // Minimal overhead
}),
)
// Bad: Heavy wrapper
pool := workerpool.New(10, 100,
workerpool.WithTaskWrapper(func(task workerpool.Task) workerpool.Task {
return func(ctx context.Context) error {
// Heavy instrumentation
start := time.Now()
defer func() {
// Complex logging/metrics
}()
return task(ctx)
}
}),
)// Zero overhead when not configured
obs := observe.New() // No-op logger, metrics, tracer// Good: Minimal allocations
logger.Info("message", "key", value)
// Bad: String formatting allocates
logger.Info(fmt.Sprintf("message: %v", value))// Instead of individual increments
for i := 0; i < 100; i++ {
metrics.Inc("counter") // 100 calls
}
// Batch if your metrics system supports it
metrics.Add("counter", 100) // 1 call// Good: Reuse
var globalCB circuit.CircuitBreaker
func init() {
globalCB = circuit.New("service")
}
// Bad: Create new each time
func handler() {
cb := circuit.New("service") // Allocates
}var requestPool = sync.Pool{
New: func() any {
return &Request{}
},
}
func getRequest() *Request {
return requestPool.Get().(*Request)
}
func putRequest(r *Request) {
r.Reset()
requestPool.Put(r)
}// Instead of one large component
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(rate/shardCount, burst/shardCount)
}// Good: Fast path check
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}
// Expensive operation only if allowed
// Bad: Always do expensive work
result := expensiveOperation()
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}go test -cpuprofile=cpu.prof -bench=. ./...
go tool pprof cpu.profgo test -memprofile=mem.prof -bench=. ./...
go tool pprof mem.profgo test -trace=trace.out -bench=. ./...
go tool trace trace.out// Optimized configuration for high throughput
type OptimizedGateway struct {
// Sharded rate limiters
limiters []ratelimit.Limiter
// Per-route circuit breakers (reused)
circuits map[string]circuit.CircuitBreaker
// Worker pool for request processing
pool *workerpool.Pool
}
func NewOptimizedGateway() *OptimizedGateway {
// Shard rate limiters
shardCount := runtime.NumCPU()
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(
ratelimit.PerSecond(1000/shardCount),
2000/shardCount,
)
}
// Reuse circuit breakers
circuits := make(map[string]circuit.CircuitBreaker)
// Sized worker pool
pool := workerpool.New(runtime.NumCPU()*2, 200)
return &OptimizedGateway{
limiters: limiters,
circuits: circuits,
pool: pool,
}
}
func (g *OptimizedGateway) HandleRequest(ctx context.Context, req *Request) error {
// Shard rate limiting
limiter := g.limiters[hash(req.UserID)%len(g.limiters)]
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}
// Get or create circuit breaker
cb := g.getCircuitBreaker(req.Route)
// Process in worker pool
return g.pool.Submit(ctx, func(ctx context.Context) error {
_, err := cb.Execute(ctx, func(ctx context.Context) (any, error) {
return g.processRequest(ctx, req)
})
return err
})
}# Run all benchmarks
go test -bench=. -benchmem ./...
# Run specific benchmark
go test -bench=BenchmarkCircuitBreaker_Execute -benchmem ./circuit
# Run with race detector
go test -bench=. -race ./...BenchmarkCircuitBreaker_Execute-8 10000000 150 ns/op 0 B/op 0 allocs/op// Good: Fast path check
if !cb.allowRequest() {
return nil, NewCircuitOpenError(cb.name)
}// Good: Simple predicate
circuit.WithFailurePredicate(func(err error) bool {
return err != nil && isServerError(err)
})
// Bad: Complex predicate with allocations
circuit.WithFailurePredicate(func(err error) bool {
return strings.Contains(err.Error(), "timeout") // Allocates
})// For high-throughput scenarios, use no-op metrics
obs := observe.New() // No-op by default
cb := circuit.New("service", circuit.WithObservability(obs))// Use multiple limiters for sharding
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(rate/shardCount, burst/shardCount)
}
// Route requests to different limiters
limiter := limiters[hash(key)%shardCount]// Good: Appropriate capacity
limiter := ratelimit.NewLeakyBucket(
ratelimit.PerSecond(1000),
2000, // 2x rate for burst capacity
)// Pre-warm routes for better performance
for route, config := range routeConfigs {
limiter.GetRouteLimiter(route) // Pre-create
}// Maximum performance: None fairness
sem := semaphore.NewWeighted(100, semaphore.WithFairness(semaphore.None))
// Balanced: FIFO fairness (default)
sem := semaphore.NewWeighted(100, semaphore.WithFairness(semaphore.FIFO))// Instead of one large semaphore
dbSem := semaphore.NewWeighted(10)
fileSem := semaphore.NewWeighted(5)
apiSem := semaphore.NewWeighted(20)// CPU-bound: Number of CPUs
pool := workerpool.New(runtime.NumCPU(), 100)
// I/O-bound: More workers
pool := workerpool.New(runtime.NumCPU()*2, 200)// Too small: Frequent rejections
pool := workerpool.New(10, 5) // Bad
// Too large: Memory waste
pool := workerpool.New(10, 10000) // Bad
// Balanced: Based on workload
pool := workerpool.New(10, 100) // Good// Good: Minimal wrapper
pool := workerpool.New(10, 100,
workerpool.WithTaskWrapper(func(task workerpool.Task) workerpool.Task {
return task // Minimal overhead
}),
)
// Bad: Heavy wrapper
pool := workerpool.New(10, 100,
workerpool.WithTaskWrapper(func(task workerpool.Task) workerpool.Task {
return func(ctx context.Context) error {
// Heavy instrumentation
start := time.Now()
defer func() {
// Complex logging/metrics
}()
return task(ctx)
}
}),
)// Zero overhead when not configured
obs := observe.New() // No-op logger, metrics, tracer// Good: Minimal allocations
logger.Info("message", "key", value)
// Bad: String formatting allocates
logger.Info(fmt.Sprintf("message: %v", value))// Instead of individual increments
for i := 0; i < 100; i++ {
metrics.Inc("counter") // 100 calls
}
// Batch if your metrics system supports it
metrics.Add("counter", 100) // 1 call// Good: Reuse
var globalCB circuit.CircuitBreaker
func init() {
globalCB = circuit.New("service")
}
// Bad: Create new each time
func handler() {
cb := circuit.New("service") // Allocates
}var requestPool = sync.Pool{
New: func() any {
return &Request{}
},
}
func getRequest() *Request {
return requestPool.Get().(*Request)
}
func putRequest(r *Request) {
r.Reset()
requestPool.Put(r)
}// Instead of one large component
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(rate/shardCount, burst/shardCount)
}// Good: Fast path check
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}
// Expensive operation only if allowed
// Bad: Always do expensive work
result := expensiveOperation()
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}go test -cpuprofile=cpu.prof -bench=. ./...
go tool pprof cpu.profgo test -memprofile=mem.prof -bench=. ./...
go tool pprof mem.profgo test -trace=trace.out -bench=. ./...
go tool trace trace.out// Optimized configuration for high throughput
type OptimizedGateway struct {
// Sharded rate limiters
limiters []ratelimit.Limiter
// Per-route circuit breakers (reused)
circuits map[string]circuit.CircuitBreaker
// Worker pool for request processing
pool *workerpool.Pool
}
func NewOptimizedGateway() *OptimizedGateway {
// Shard rate limiters
shardCount := runtime.NumCPU()
limiters := make([]ratelimit.Limiter, shardCount)
for i := range limiters {
limiters[i] = ratelimit.NewTokenBucket(
ratelimit.PerSecond(1000/shardCount),
2000/shardCount,
)
}
// Reuse circuit breakers
circuits := make(map[string]circuit.CircuitBreaker)
// Sized worker pool
pool := workerpool.New(runtime.NumCPU()*2, 200)
return &OptimizedGateway{
limiters: limiters,
circuits: circuits,
pool: pool,
}
}
func (g *OptimizedGateway) HandleRequest(ctx context.Context, req *Request) error {
// Shard rate limiting
limiter := g.limiters[hash(req.UserID)%len(g.limiters)]
if !limiter.AllowN(time.Now(), 1) {
return errors.New("rate limited")
}
// Get or create circuit breaker
cb := g.getCircuitBreaker(req.Route)
// Process in worker pool
return g.pool.Submit(ctx, func(ctx context.Context) error {
_, err := cb.Execute(ctx, func(ctx context.Context) (any, error) {
return g.processRequest(ctx, req)
})
return err
})
}