Loading documentation...
Loading documentation...
Loading documentation...
Complete API documentation for the ratelimit package.
Import Path: github.com/kolosys/ion/ratelimit
Package ratelimit provides local process rate limiters for controlling function and I/O throughput. It includes token bucket and leaky bucket implementations with configurable options.
Clock abstracts time operations for testability.
// Example implementation of Clock
type MyClock struct {
// Add your fields here
}
func (m MyClock) Now() time.Time {
// Implement your logic here
return
}
func (m MyClock) Sleep(param1 time.Duration) {
// Implement your logic here
return
}
func (m MyClock) AfterFunc(param1 time.Duration, param2 func()) Timer {
// Implement your logic here
return
}
type Clock interface {
Now() time.Time
Sleep(time.Duration)
AfterFunc(time.Duration, func()) Timer
}| Method | Description |
|---|
LeakyBucket implements a leaky bucket rate limiter. Requests are added to the bucket, and the bucket leaks at a constant rate. If the bucket is full, requests are denied or must wait.
// Create a new LeakyBucket
leakybucket := LeakyBucket{
}type LeakyBucket struct {
}NewLeakyBucket creates a new leaky bucket rate limiter. rate determines how fast the bucket leaks (processes requests). capacity is the maximum number of requests the bucket can hold.
func NewLeakyBucket(rate Rate, capacity int, opts ...Option) *LeakyBucketParameters:
rate (Rate)capacity (int)opts (...Option)Returns:
AllowN reports whether n requests can be added to the bucket at time now. It returns true if the requests were accepted, false otherwise.
func (*LeakyBucket) AllowN(now time.Time, n int) boolParameters:
now (time.Time)n (int)Returns:
Available returns the number of requests that can be immediately accepted.
func (*LeakyBucket) Available() intParameters: None
Returns:
Capacity returns the bucket capacity.
func (*LeakyBucket) Capacity() intParameters: None
Returns:
Level returns the current level of the bucket.
func (*LeakyBucket) Level() float64Parameters: None
Returns:
Rate returns the current leak rate.
func (*LeakyBucket) Rate() RateParameters: None
Returns:
WaitN blocks until n requests can be added to the bucket or the context is canceled.
func (*LeakyBucket) WaitN(ctx context.Context, n int) errorParameters:
ctx (context.Context)n (int)Returns:
Limiter represents a rate limiter that controls the rate at which events are allowed to occur.
// Example implementation of Limiter
type MyLimiter struct {
// Add your fields here
}
func (m MyLimiter) AllowN(param1 time.Time, param2 int) bool {
// Implement your logic here
return
}
func (m MyLimiter) WaitN(param1 context.Context, param2 int) error {
// Implement your logic here
return
}
type Limiter interface {
AllowN(now time.Time, n int) bool
WaitN(ctx context.Context, n int) error
}| Method | Description |
|---|
MultiTierConfig holds configuration for multi-tier rate limiting.
// Create a new MultiTierConfig
multitierconfig := MultiTierConfig{
GlobalRate: Rate{},
GlobalBurst: 42,
DefaultRouteRate: Rate{},
DefaultRouteBurst: 42,
DefaultResourceRate: Rate{},
DefaultResourceBurst: 42,
QueueSize: 42,
EnablePreemptive: true,
EnableBucketMapping: true,
BucketTTL: /* value */,
RoutePatterns: map[],
}type MultiTierConfig struct {
GlobalRate Rate
GlobalBurst int
DefaultRouteRate Rate
DefaultRouteBurst int
DefaultResourceRate Rate
DefaultResourceBurst int
QueueSize int
EnablePreemptive bool
EnableBucketMapping bool
BucketTTL time.Duration
RoutePatterns map[string]RouteConfig
}| Field | Type | Description |
|---|---|---|
| GlobalRate | Rate | Global rate limit configuration |
| GlobalBurst | int | |
| DefaultRouteRate | Rate | Default rate limits for routes and resources |
| DefaultRouteBurst | int | |
| DefaultResourceRate | Rate | |
| DefaultResourceBurst | int | |
| QueueSize | int | Queue configuration for request management |
| EnablePreemptive | bool | |
| EnableBucketMapping | bool | Bucket management |
| BucketTTL | time.Duration | |
| RoutePatterns | map[string]RouteConfig | Route pattern matching |
DefaultMultiTierConfig returns a default configuration for multi-tier rate limiting. Applications should customize this configuration for their specific needs.
func DefaultMultiTierConfig() *MultiTierConfigParameters: None
Returns:
MultiTierLimiter implements a sophisticated multi-tier rate limiting system. It supports global, per-route, and per-resource rate limiting with intelligent bucket management and flexible API compatibility.
// Create a new MultiTierLimiter
multitierlimiter := MultiTierLimiter{
}type MultiTierLimiter struct {
}NewMultiTierLimiter creates a new multi-tier rate limiter.
func NewMultiTierLimiter(config *MultiTierConfig, opts ...Option) *MultiTierLimiterParameters:
config (*MultiTierConfig)opts (...Option)Returns:
Allow checks if a request is allowed without blocking.
func (*MultiTierLimiter) Allow(req *Request) boolParameters:
req (*Request)Returns:
AllowN checks if n requests are allowed without blocking.
func (*LeakyBucket) AllowN(now time.Time, n int) boolParameters:
now (time.Time)n (int)Returns:
GetMetrics returns current rate limiting metrics.
func (*MultiTierLimiter) GetMetrics() *MultiTierMetricsParameters: None
Returns:
Reset resets all rate limit buckets (useful for testing).
func (*MultiTierLimiter) Reset()Parameters: None
Returns: None
UpdateRateLimitFromHeaders updates rate limit information from API response headers. This is designed for APIs that provide rate limit information in response headers.
func (*MultiTierLimiter) UpdateRateLimitFromHeaders(req *Request, headers map[string]string) errorParameters:
req (*Request)headers (map[string]string)Returns:
Wait blocks until the request is allowed or context is canceled.
func (*MultiTierLimiter) Wait(req *Request) errorParameters:
req (*Request)Returns:
WaitN blocks until n requests are allowed or context is canceled.
func (*LeakyBucket) WaitN(ctx context.Context, n int) errorParameters:
ctx (context.Context)n (int)Returns:
MultiTierMetrics tracks metrics for multi-tier rate limiting.
// Create a new MultiTierMetrics
multitiermetrics := MultiTierMetrics{
TotalRequests: 42,
GlobalLimitHits: 42,
RouteLimitHits: 42,
ResourceLimitHits: 42,
QueuedRequests: 42,
DroppedRequests: 42,
AvgWaitTime: /* value */,
MaxWaitTime: /* value */,
BucketsActive: 42,
}type MultiTierMetrics struct {
TotalRequests int64
GlobalLimitHits int64
RouteLimitHits int64
ResourceLimitHits int64
QueuedRequests int64
DroppedRequests int64
AvgWaitTime time.Duration
MaxWaitTime time.Duration
BucketsActive int64
}| Field | Type | Description |
|---|---|---|
| TotalRequests | int64 | |
| GlobalLimitHits | int64 | |
| RouteLimitHits | int64 | |
| ResourceLimitHits | int64 | |
| QueuedRequests | int64 | |
| DroppedRequests | int64 | |
| AvgWaitTime | time.Duration | |
| MaxWaitTime | time.Duration | |
| BucketsActive | int64 |
Option configures rate limiter behavior.
// Example usage of Option
var value Option
// Initialize with appropriate valuetype Option func(*config)WithClock sets a custom clock implementation (useful for testing).
func WithClock(clock Clock) OptionParameters:
clock (Clock)Returns:
WithJitter sets the jitter factor for WaitN operations (0.0 to 1.0). Jitter helps prevent thundering herd problems by randomizing wait times.
func WithJitter(jitter float64) OptionParameters:
jitter (float64)Returns:
WithLogger sets the logger for observability.
func WithLogger(logger observe.Logger) OptionParameters:
logger (observe.Logger)Returns:
WithMetrics sets the metrics recorder for observability.
func WithMetrics(metrics observe.Metrics) OptionParameters:
metrics (observe.Metrics)Returns:
WithName sets the rate limiter name for observability and error reporting.
func WithName(name string) OptionParameters:
name (string)Returns:
WithTracer sets the tracer for observability.
func WithTracer(tracer observe.Tracer) OptionParameters:
tracer (observe.Tracer)Returns:
Rate represents the rate at which tokens are added to the bucket.
// Create a new Rate
rate := Rate{
TokensPerSec: 3.14,
}type Rate struct {
TokensPerSec float64
}| Field | Type | Description |
|---|---|---|
| TokensPerSec | float64 |
NewRate creates a new Rate from the given number of tokens per time duration.
func NewRate(tokens int, duration time.Duration) RateParameters:
tokens (int)duration (time.Duration)Returns:
Per is a convenience function for creating rates. For example: Per(100, time.Second) creates a rate of 100 tokens per second.
func Per(tokens int, duration time.Duration) RateParameters:
tokens (int)duration (time.Duration)Returns:
PerHour creates a rate of the given number of tokens per hour.
func PerHour(tokens int) RateParameters:
tokens (int)Returns:
PerMinute creates a rate of the given number of tokens per minute.
func PerMinute(tokens int) RateParameters:
tokens (int)Returns:
PerSecond creates a rate of the given number of tokens per second.
func PerSecond(tokens int) RateParameters:
tokens (int)Returns:
String returns a string representation of the rate.
func (Rate) String() stringParameters: None
Returns:
RateLimitError represents rate limiting specific errors with context
// Create a new RateLimitError
ratelimiterror := RateLimitError{
Op: "example",
LimiterName: "example",
Err: error{},
RetryAfter: /* value */,
Global: true,
Bucket: "example",
Remaining: 42,
Limit: 42,
}type RateLimitError struct {
Op string
LimiterName string
Err error
RetryAfter time.Duration
Global bool
Bucket string
Remaining int
Limit int
}| Field | Type | Description |
|---|---|---|
| Op | string | operation that failed |
| LimiterName | string | name of the rate limiter |
| Err | error | underlying error |
| RetryAfter | time.Duration | suggested retry delay |
| Global | bool | whether this is a global rate limit |
| Bucket | string | rate limit bucket identifier |
| Remaining | int | remaining requests in bucket |
| Limit | int | total limit for bucket |
func (*RateLimitError) Error() stringParameters: None
Returns:
IsRetryable returns true if the rate limit error suggests retrying.
func (*RateLimitError) IsRetryable() boolParameters: None
Returns:
func (*RateLimitError) Unwrap() errorParameters: None
Returns:
Request represents a request for rate limiting evaluation.
// Create a new Request
request := Request{
Method: "example",
Endpoint: "example",
ResourceID: "example",
SubResourceID: "example",
UserID: "example",
MajorParameters: map[],
Priority: 42,
Context: /* value */,
}type Request struct {
Method string
Endpoint string
ResourceID string
SubResourceID string
UserID string
MajorParameters map[string]string
Priority int
Context context.Context
}| Field | Type | Description |
|---|---|---|
| Method | string | Route information |
| Endpoint | string | |
| ResourceID | string | Resource identifiers (generic - applications define their own) |
| SubResourceID | string | Secondary resource identifier |
| UserID | string | User/actor identifier |
| MajorParameters | map[string]string | Major parameters for bucket identification |
| Priority | int | Request metadata |
| Context | context.Context |
RouteConfig defines rate limiting for specific route patterns.
// Create a new RouteConfig
routeconfig := RouteConfig{
Rate: Rate{},
Burst: 42,
MajorParameters: [],
}type RouteConfig struct {
Rate Rate
Burst int
MajorParameters []string
}| Field | Type | Description |
|---|---|---|
| Rate | Rate | |
| Burst | int | |
| MajorParameters | []string | Major parameters that affect rate limiting (e.g., org_id, project_id) |
Timer represents a timer that can be stopped.
// Example implementation of Timer
type MyTimer struct {
// Add your fields here
}
func (m MyTimer) Stop() bool {
// Implement your logic here
return
}
type Timer interface {
Stop() bool
}| Method | Description |
|---|
TokenBucket implements a token bucket rate limiter. Tokens are added to the bucket at a fixed rate, and requests consume tokens. If no tokens are available, requests must wait or are denied.
// Create a new TokenBucket
tokenbucket := TokenBucket{
}type TokenBucket struct {
}NewTokenBucket creates a new token bucket rate limiter. rate determines how fast tokens are added to the bucket. burst is the maximum number of tokens the bucket can hold.
func NewTokenBucket(rate Rate, burst int, opts ...Option) *TokenBucketParameters:
rate (Rate)burst (int)opts (...Option)Returns:
AllowN reports whether n tokens are available at time now. It returns true if the tokens were consumed, false otherwise.
func (*LeakyBucket) AllowN(now time.Time, n int) boolParameters:
now (time.Time)n (int)Returns:
Burst returns the bucket capacity.
func (*TokenBucket) Burst() intParameters: None
Returns:
Rate returns the current token refill rate.
func (*LeakyBucket) Rate() RateParameters: None
Returns:
Tokens returns the current number of available tokens.
func (*TokenBucket) Tokens() float64Parameters: None
Returns:
WaitN blocks until n tokens are available or the context is canceled.
func (*LeakyBucket) WaitN(ctx context.Context, n int) errorParameters:
ctx (context.Context)n (int)Returns:
NewBucketLimitError creates an error for bucket-specific rate limits
func NewBucketLimitError(limiterName, bucket string, remaining, limit int, retryAfter time.Duration) errorParameters:
| Parameter | Type | Description |
|---|---|---|
limiterName | string | |
bucket | string | |
remaining | int | |
limit | int | |
retryAfter | time.Duration |
Returns:
| Type | Description |
|---|---|
error |
Example:
// Example usage of NewBucketLimitError
result := NewBucketLimitError(/* parameters */)NewGlobalRateLimitError creates an error for global rate limit hits
func NewGlobalRateLimitError(limiterName string, retryAfter time.Duration) errorParameters:
| Parameter | Type | Description |
|---|---|---|
limiterName | string | |
retryAfter | time.Duration |
Returns:
| Type | Description |
|---|---|
error |
Example:
// Example usage of NewGlobalRateLimitError
result := NewGlobalRateLimitError(/* parameters */)NewRateLimitExceededError creates an error indicating rate limit was exceeded
func NewRateLimitExceededError(limiterName string, retryAfter time.Duration) errorParameters:
| Parameter | Type | Description |
|---|---|---|
limiterName | string | |
retryAfter | time.Duration |
Returns:
| Type | Description |
|---|---|
error |
Example:
// Example usage of NewRateLimitExceededError
result := NewRateLimitExceededError(/* parameters */)Complete API documentation for the ratelimit package.
Import Path: github.com/kolosys/ion/ratelimit
Package ratelimit provides local process rate limiters for controlling function and I/O throughput. It includes token bucket and leaky bucket implementations with configurable options.
Clock abstracts time operations for testability.
// Example implementation of Clock
type MyClock struct {
// Add your fields here
}
func (m MyClock) Now() time.Time {
// Implement your logic here
return
}
func (m MyClock) Sleep(param1 time.Duration) {
// Implement your logic here
return
}
func (m MyClock) AfterFunc(param1 time.Duration, param2 func()) Timer {
// Implement your logic here
return
}
type Clock interface {
Now() time.Time
Sleep(time.Duration)
AfterFunc(time.Duration, func()) Timer
}| Method | Description |
|---|
LeakyBucket implements a leaky bucket rate limiter. Requests are added to the bucket, and the bucket leaks at a constant rate. If the bucket is full, requests are denied or must wait.
// Create a new LeakyBucket
leakybucket := LeakyBucket{
}type LeakyBucket struct {
}NewLeakyBucket creates a new leaky bucket rate limiter. rate determines how fast the bucket leaks (processes requests). capacity is the maximum number of requests the bucket can hold.
func NewLeakyBucket(rate Rate, capacity int, opts ...Option) *LeakyBucketParameters:
rate (Rate)capacity (int)opts (...Option)Returns:
AllowN reports whether n requests can be added to the bucket at time now. It returns true if the requests were accepted, false otherwise.
func (*LeakyBucket) AllowN(now time.Time, n int) boolParameters:
now (time.Time)n (int)Returns:
Available returns the number of requests that can be immediately accepted.
func (*LeakyBucket) Available() intParameters: None
Returns:
Capacity returns the bucket capacity.
func (*LeakyBucket) Capacity() intParameters: None
Returns:
Level returns the current level of the bucket.
func (*LeakyBucket) Level() float64Parameters: None
Returns:
Rate returns the current leak rate.
func (*LeakyBucket) Rate() RateParameters: None
Returns:
WaitN blocks until n requests can be added to the bucket or the context is canceled.
func (*LeakyBucket) WaitN(ctx context.Context, n int) errorParameters:
ctx (context.Context)n (int)Returns:
Limiter represents a rate limiter that controls the rate at which events are allowed to occur.
// Example implementation of Limiter
type MyLimiter struct {
// Add your fields here
}
func (m MyLimiter) AllowN(param1 time.Time, param2 int) bool {
// Implement your logic here
return
}
func (m MyLimiter) WaitN(param1 context.Context, param2 int) error {
// Implement your logic here
return
}
type Limiter interface {
AllowN(now time.Time, n int) bool
WaitN(ctx context.Context, n int) error
}| Method | Description |
|---|
MultiTierConfig holds configuration for multi-tier rate limiting.
// Create a new MultiTierConfig
multitierconfig := MultiTierConfig{
GlobalRate: Rate{},
GlobalBurst: 42,
DefaultRouteRate: Rate{},
DefaultRouteBurst: 42,
DefaultResourceRate: Rate{},
DefaultResourceBurst: 42,
QueueSize: 42,
EnablePreemptive: true,
EnableBucketMapping: true,
BucketTTL: /* value */,
RoutePatterns: map[],
}type MultiTierConfig struct {
GlobalRate Rate
GlobalBurst int
DefaultRouteRate Rate
DefaultRouteBurst int
DefaultResourceRate Rate
DefaultResourceBurst int
QueueSize int
EnablePreemptive bool
EnableBucketMapping bool
BucketTTL time.Duration
RoutePatterns map[string]RouteConfig
}| Field | Type | Description |
|---|---|---|
| GlobalRate | Rate | Global rate limit configuration |
| GlobalBurst | int | |
| DefaultRouteRate | Rate | Default rate limits for routes and resources |
| DefaultRouteBurst | int | |
| DefaultResourceRate | Rate | |
| DefaultResourceBurst | int | |
| QueueSize | int | Queue configuration for request management |
| EnablePreemptive | bool | |
| EnableBucketMapping | bool | Bucket management |
| BucketTTL | time.Duration | |
| RoutePatterns | map[string]RouteConfig | Route pattern matching |
DefaultMultiTierConfig returns a default configuration for multi-tier rate limiting. Applications should customize this configuration for their specific needs.
func DefaultMultiTierConfig() *MultiTierConfigParameters: None
Returns:
MultiTierLimiter implements a sophisticated multi-tier rate limiting system. It supports global, per-route, and per-resource rate limiting with intelligent bucket management and flexible API compatibility.
// Create a new MultiTierLimiter
multitierlimiter := MultiTierLimiter{
}type MultiTierLimiter struct {
}NewMultiTierLimiter creates a new multi-tier rate limiter.
func NewMultiTierLimiter(config *MultiTierConfig, opts ...Option) *MultiTierLimiterParameters:
config (*MultiTierConfig)opts (...Option)Returns:
Allow checks if a request is allowed without blocking.
func (*MultiTierLimiter) Allow(req *Request) boolParameters:
req (*Request)Returns:
AllowN checks if n requests are allowed without blocking.
func (*LeakyBucket) AllowN(now time.Time, n int) boolParameters:
now (time.Time)n (int)Returns:
GetMetrics returns current rate limiting metrics.
func (*MultiTierLimiter) GetMetrics() *MultiTierMetricsParameters: None
Returns:
Reset resets all rate limit buckets (useful for testing).
func (*MultiTierLimiter) Reset()Parameters: None
Returns: None
UpdateRateLimitFromHeaders updates rate limit information from API response headers. This is designed for APIs that provide rate limit information in response headers.
func (*MultiTierLimiter) UpdateRateLimitFromHeaders(req *Request, headers map[string]string) errorParameters:
req (*Request)headers (map[string]string)Returns:
Wait blocks until the request is allowed or context is canceled.
func (*MultiTierLimiter) Wait(req *Request) errorParameters:
req (*Request)Returns:
WaitN blocks until n requests are allowed or context is canceled.
func (*LeakyBucket) WaitN(ctx context.Context, n int) errorParameters:
ctx (context.Context)n (int)Returns:
MultiTierMetrics tracks metrics for multi-tier rate limiting.
// Create a new MultiTierMetrics
multitiermetrics := MultiTierMetrics{
TotalRequests: 42,
GlobalLimitHits: 42,
RouteLimitHits: 42,
ResourceLimitHits: 42,
QueuedRequests: 42,
DroppedRequests: 42,
AvgWaitTime: /* value */,
MaxWaitTime: /* value */,
BucketsActive: 42,
}type MultiTierMetrics struct {
TotalRequests int64
GlobalLimitHits int64
RouteLimitHits int64
ResourceLimitHits int64
QueuedRequests int64
DroppedRequests int64
AvgWaitTime time.Duration
MaxWaitTime time.Duration
BucketsActive int64
}| Field | Type | Description |
|---|---|---|
| TotalRequests | int64 | |
| GlobalLimitHits | int64 | |
| RouteLimitHits | int64 | |
| ResourceLimitHits | int64 | |
| QueuedRequests | int64 | |
| DroppedRequests | int64 | |
| AvgWaitTime | time.Duration | |
| MaxWaitTime | time.Duration | |
| BucketsActive | int64 |
Option configures rate limiter behavior.
// Example usage of Option
var value Option
// Initialize with appropriate valuetype Option func(*config)WithClock sets a custom clock implementation (useful for testing).
func WithClock(clock Clock) OptionParameters:
clock (Clock)Returns:
WithJitter sets the jitter factor for WaitN operations (0.0 to 1.0). Jitter helps prevent thundering herd problems by randomizing wait times.
func WithJitter(jitter float64) OptionParameters:
jitter (float64)Returns:
WithLogger sets the logger for observability.
func WithLogger(logger observe.Logger) OptionParameters:
logger (observe.Logger)Returns:
WithMetrics sets the metrics recorder for observability.
func WithMetrics(metrics observe.Metrics) OptionParameters:
metrics (observe.Metrics)Returns:
WithName sets the rate limiter name for observability and error reporting.
func WithName(name string) OptionParameters:
name (string)Returns:
WithTracer sets the tracer for observability.
func WithTracer(tracer observe.Tracer) OptionParameters:
tracer (observe.Tracer)Returns:
Rate represents the rate at which tokens are added to the bucket.
// Create a new Rate
rate := Rate{
TokensPerSec: 3.14,
}type Rate struct {
TokensPerSec float64
}| Field | Type | Description |
|---|---|---|
| TokensPerSec | float64 |
NewRate creates a new Rate from the given number of tokens per time duration.
func NewRate(tokens int, duration time.Duration) RateParameters:
tokens (int)duration (time.Duration)Returns:
Per is a convenience function for creating rates. For example: Per(100, time.Second) creates a rate of 100 tokens per second.
func Per(tokens int, duration time.Duration) RateParameters:
tokens (int)duration (time.Duration)Returns:
PerHour creates a rate of the given number of tokens per hour.
func PerHour(tokens int) RateParameters:
tokens (int)Returns:
PerMinute creates a rate of the given number of tokens per minute.
func PerMinute(tokens int) RateParameters:
tokens (int)Returns:
PerSecond creates a rate of the given number of tokens per second.
func PerSecond(tokens int) RateParameters:
tokens (int)Returns:
String returns a string representation of the rate.
func (Rate) String() stringParameters: None
Returns:
RateLimitError represents rate limiting specific errors with context
// Create a new RateLimitError
ratelimiterror := RateLimitError{
Op: "example",
LimiterName: "example",
Err: error{},
RetryAfter: /* value */,
Global: true,
Bucket: "example",
Remaining: 42,
Limit: 42,
}type RateLimitError struct {
Op string
LimiterName string
Err error
RetryAfter time.Duration
Global bool
Bucket string
Remaining int
Limit int
}| Field | Type | Description |
|---|---|---|
| Op | string | operation that failed |
| LimiterName | string | name of the rate limiter |
| Err | error | underlying error |
| RetryAfter | time.Duration | suggested retry delay |
| Global | bool | whether this is a global rate limit |
| Bucket | string | rate limit bucket identifier |
| Remaining | int | remaining requests in bucket |
| Limit | int | total limit for bucket |
func (*RateLimitError) Error() stringParameters: None
Returns:
IsRetryable returns true if the rate limit error suggests retrying.
func (*RateLimitError) IsRetryable() boolParameters: None
Returns:
func (*RateLimitError) Unwrap() errorParameters: None
Returns:
Request represents a request for rate limiting evaluation.
// Create a new Request
request := Request{
Method: "example",
Endpoint: "example",
ResourceID: "example",
SubResourceID: "example",
UserID: "example",
MajorParameters: map[],
Priority: 42,
Context: /* value */,
}type Request struct {
Method string
Endpoint string
ResourceID string
SubResourceID string
UserID string
MajorParameters map[string]string
Priority int
Context context.Context
}| Field | Type | Description |
|---|---|---|
| Method | string | Route information |
| Endpoint | string | |
| ResourceID | string | Resource identifiers (generic - applications define their own) |
| SubResourceID | string | Secondary resource identifier |
| UserID | string | User/actor identifier |
| MajorParameters | map[string]string | Major parameters for bucket identification |
| Priority | int | Request metadata |
| Context | context.Context |
RouteConfig defines rate limiting for specific route patterns.
// Create a new RouteConfig
routeconfig := RouteConfig{
Rate: Rate{},
Burst: 42,
MajorParameters: [],
}type RouteConfig struct {
Rate Rate
Burst int
MajorParameters []string
}| Field | Type | Description |
|---|---|---|
| Rate | Rate | |
| Burst | int | |
| MajorParameters | []string | Major parameters that affect rate limiting (e.g., org_id, project_id) |
Timer represents a timer that can be stopped.
// Example implementation of Timer
type MyTimer struct {
// Add your fields here
}
func (m MyTimer) Stop() bool {
// Implement your logic here
return
}
type Timer interface {
Stop() bool
}| Method | Description |
|---|
TokenBucket implements a token bucket rate limiter. Tokens are added to the bucket at a fixed rate, and requests consume tokens. If no tokens are available, requests must wait or are denied.
// Create a new TokenBucket
tokenbucket := TokenBucket{
}type TokenBucket struct {
}NewTokenBucket creates a new token bucket rate limiter. rate determines how fast tokens are added to the bucket. burst is the maximum number of tokens the bucket can hold.
func NewTokenBucket(rate Rate, burst int, opts ...Option) *TokenBucketParameters:
rate (Rate)burst (int)opts (...Option)Returns:
AllowN reports whether n tokens are available at time now. It returns true if the tokens were consumed, false otherwise.
func (*LeakyBucket) AllowN(now time.Time, n int) boolParameters:
now (time.Time)n (int)Returns:
Burst returns the bucket capacity.
func (*TokenBucket) Burst() intParameters: None
Returns:
Rate returns the current token refill rate.
func (*LeakyBucket) Rate() RateParameters: None
Returns:
Tokens returns the current number of available tokens.
func (*TokenBucket) Tokens() float64Parameters: None
Returns:
WaitN blocks until n tokens are available or the context is canceled.
func (*LeakyBucket) WaitN(ctx context.Context, n int) errorParameters:
ctx (context.Context)n (int)Returns:
NewBucketLimitError creates an error for bucket-specific rate limits
func NewBucketLimitError(limiterName, bucket string, remaining, limit int, retryAfter time.Duration) errorParameters:
| Parameter | Type | Description |
|---|---|---|
limiterName | string | |
bucket | string | |
remaining | int | |
limit | int | |
retryAfter | time.Duration |
Returns:
| Type | Description |
|---|---|
error |
Example:
// Example usage of NewBucketLimitError
result := NewBucketLimitError(/* parameters */)NewGlobalRateLimitError creates an error for global rate limit hits
func NewGlobalRateLimitError(limiterName string, retryAfter time.Duration) errorParameters:
| Parameter | Type | Description |
|---|---|---|
limiterName | string | |
retryAfter | time.Duration |
Returns:
| Type | Description |
|---|---|
error |
Example:
// Example usage of NewGlobalRateLimitError
result := NewGlobalRateLimitError(/* parameters */)NewRateLimitExceededError creates an error indicating rate limit was exceeded
func NewRateLimitExceededError(limiterName string, retryAfter time.Duration) errorParameters:
| Parameter | Type | Description |
|---|---|---|
limiterName | string | |
retryAfter | time.Duration |
Returns:
| Type | Description |
|---|---|
error |
Example:
// Example usage of NewRateLimitExceededError
result := NewRateLimitExceededError(/* parameters */)// Example implementation of Clock
type MyClock struct {
// Add your fields here
}
func (m MyClock) Now() time.Time {
// Implement your logic here
return
}
func (m MyClock) Sleep(param1 time.Duration) {
// Implement your logic here
return
}
func (m MyClock) AfterFunc(param1 time.Duration, param2 func()) Timer {
// Implement your logic here
return
}
type Clock interface {
Now() time.Time
Sleep(time.Duration)
AfterFunc(time.Duration, func()) Timer
}// Create a new LeakyBucket
leakybucket := LeakyBucket{
}type LeakyBucket struct {
}func NewLeakyBucket(rate Rate, capacity int, opts ...Option) *LeakyBucketfunc (*LeakyBucket) AllowN(now time.Time, n int) boolfunc (*LeakyBucket) Available() intfunc (*LeakyBucket) Capacity() intfunc (*LeakyBucket) Level() float64func (*LeakyBucket) Rate() Ratefunc (*LeakyBucket) WaitN(ctx context.Context, n int) error// Example implementation of Limiter
type MyLimiter struct {
// Add your fields here
}
func (m MyLimiter) AllowN(param1 time.Time, param2 int) bool {
// Implement your logic here
return
}
func (m MyLimiter) WaitN(param1 context.Context, param2 int) error {
// Implement your logic here
return
}
type Limiter interface {
AllowN(now time.Time, n int) bool
WaitN(ctx context.Context, n int) error
}// Create a new MultiTierConfig
multitierconfig := MultiTierConfig{
GlobalRate: Rate{},
GlobalBurst: 42,
DefaultRouteRate: Rate{},
DefaultRouteBurst: 42,
DefaultResourceRate: Rate{},
DefaultResourceBurst: 42,
QueueSize: 42,
EnablePreemptive: true,
EnableBucketMapping: true,
BucketTTL: /* value */,
RoutePatterns: map[],
}type MultiTierConfig struct {
GlobalRate Rate
GlobalBurst int
DefaultRouteRate Rate
DefaultRouteBurst int
DefaultResourceRate Rate
DefaultResourceBurst int
QueueSize int
EnablePreemptive bool
EnableBucketMapping bool
BucketTTL time.Duration
RoutePatterns map[string]RouteConfig
}func DefaultMultiTierConfig() *MultiTierConfig// Create a new MultiTierLimiter
multitierlimiter := MultiTierLimiter{
}type MultiTierLimiter struct {
}func NewMultiTierLimiter(config *MultiTierConfig, opts ...Option) *MultiTierLimiterfunc (*MultiTierLimiter) Allow(req *Request) boolfunc (*LeakyBucket) AllowN(now time.Time, n int) boolfunc (*MultiTierLimiter) GetMetrics() *MultiTierMetricsfunc (*MultiTierLimiter) Reset()func (*MultiTierLimiter) UpdateRateLimitFromHeaders(req *Request, headers map[string]string) errorfunc (*MultiTierLimiter) Wait(req *Request) errorfunc (*LeakyBucket) WaitN(ctx context.Context, n int) error// Create a new MultiTierMetrics
multitiermetrics := MultiTierMetrics{
TotalRequests: 42,
GlobalLimitHits: 42,
RouteLimitHits: 42,
ResourceLimitHits: 42,
QueuedRequests: 42,
DroppedRequests: 42,
AvgWaitTime: /* value */,
MaxWaitTime: /* value */,
BucketsActive: 42,
}type MultiTierMetrics struct {
TotalRequests int64
GlobalLimitHits int64
RouteLimitHits int64
ResourceLimitHits int64
QueuedRequests int64
DroppedRequests int64
AvgWaitTime time.Duration
MaxWaitTime time.Duration
BucketsActive int64
}// Example usage of Option
var value Option
// Initialize with appropriate valuetype Option func(*config)func WithClock(clock Clock) Optionfunc WithJitter(jitter float64) Optionfunc WithLogger(logger observe.Logger) Optionfunc WithMetrics(metrics observe.Metrics) Optionfunc WithName(name string) Optionfunc WithTracer(tracer observe.Tracer) Option// Create a new Rate
rate := Rate{
TokensPerSec: 3.14,
}type Rate struct {
TokensPerSec float64
}func NewRate(tokens int, duration time.Duration) Ratefunc Per(tokens int, duration time.Duration) Ratefunc PerHour(tokens int) Ratefunc PerMinute(tokens int) Ratefunc PerSecond(tokens int) Ratefunc (Rate) String() string// Create a new RateLimitError
ratelimiterror := RateLimitError{
Op: "example",
LimiterName: "example",
Err: error{},
RetryAfter: /* value */,
Global: true,
Bucket: "example",
Remaining: 42,
Limit: 42,
}type RateLimitError struct {
Op string
LimiterName string
Err error
RetryAfter time.Duration
Global bool
Bucket string
Remaining int
Limit int
}func (*RateLimitError) Error() stringfunc (*RateLimitError) IsRetryable() boolfunc (*RateLimitError) Unwrap() error// Create a new Request
request := Request{
Method: "example",
Endpoint: "example",
ResourceID: "example",
SubResourceID: "example",
UserID: "example",
MajorParameters: map[],
Priority: 42,
Context: /* value */,
}type Request struct {
Method string
Endpoint string
ResourceID string
SubResourceID string
UserID string
MajorParameters map[string]string
Priority int
Context context.Context
}// Create a new RouteConfig
routeconfig := RouteConfig{
Rate: Rate{},
Burst: 42,
MajorParameters: [],
}type RouteConfig struct {
Rate Rate
Burst int
MajorParameters []string
}// Example implementation of Timer
type MyTimer struct {
// Add your fields here
}
func (m MyTimer) Stop() bool {
// Implement your logic here
return
}
type Timer interface {
Stop() bool
}// Create a new TokenBucket
tokenbucket := TokenBucket{
}type TokenBucket struct {
}func NewTokenBucket(rate Rate, burst int, opts ...Option) *TokenBucketfunc (*LeakyBucket) AllowN(now time.Time, n int) boolfunc (*TokenBucket) Burst() intfunc (*LeakyBucket) Rate() Ratefunc (*TokenBucket) Tokens() float64func (*LeakyBucket) WaitN(ctx context.Context, n int) errorfunc NewBucketLimitError(limiterName, bucket string, remaining, limit int, retryAfter time.Duration) error// Example usage of NewBucketLimitError
result := NewBucketLimitError(/* parameters */)func NewGlobalRateLimitError(limiterName string, retryAfter time.Duration) error// Example usage of NewGlobalRateLimitError
result := NewGlobalRateLimitError(/* parameters */)func NewRateLimitExceededError(limiterName string, retryAfter time.Duration) error// Example usage of NewRateLimitExceededError
result := NewRateLimitExceededError(/* parameters */)// Example implementation of Clock
type MyClock struct {
// Add your fields here
}
func (m MyClock) Now() time.Time {
// Implement your logic here
return
}
func (m MyClock) Sleep(param1 time.Duration) {
// Implement your logic here
return
}
func (m MyClock) AfterFunc(param1 time.Duration, param2 func()) Timer {
// Implement your logic here
return
}
type Clock interface {
Now() time.Time
Sleep(time.Duration)
AfterFunc(time.Duration, func()) Timer
}// Create a new LeakyBucket
leakybucket := LeakyBucket{
}type LeakyBucket struct {
}func NewLeakyBucket(rate Rate, capacity int, opts ...Option) *LeakyBucketfunc (*LeakyBucket) AllowN(now time.Time, n int) boolfunc (*LeakyBucket) Available() intfunc (*LeakyBucket) Capacity() intfunc (*LeakyBucket) Level() float64func (*LeakyBucket) Rate() Ratefunc (*LeakyBucket) WaitN(ctx context.Context, n int) error// Example implementation of Limiter
type MyLimiter struct {
// Add your fields here
}
func (m MyLimiter) AllowN(param1 time.Time, param2 int) bool {
// Implement your logic here
return
}
func (m MyLimiter) WaitN(param1 context.Context, param2 int) error {
// Implement your logic here
return
}
type Limiter interface {
AllowN(now time.Time, n int) bool
WaitN(ctx context.Context, n int) error
}// Create a new MultiTierConfig
multitierconfig := MultiTierConfig{
GlobalRate: Rate{},
GlobalBurst: 42,
DefaultRouteRate: Rate{},
DefaultRouteBurst: 42,
DefaultResourceRate: Rate{},
DefaultResourceBurst: 42,
QueueSize: 42,
EnablePreemptive: true,
EnableBucketMapping: true,
BucketTTL: /* value */,
RoutePatterns: map[],
}type MultiTierConfig struct {
GlobalRate Rate
GlobalBurst int
DefaultRouteRate Rate
DefaultRouteBurst int
DefaultResourceRate Rate
DefaultResourceBurst int
QueueSize int
EnablePreemptive bool
EnableBucketMapping bool
BucketTTL time.Duration
RoutePatterns map[string]RouteConfig
}func DefaultMultiTierConfig() *MultiTierConfig// Create a new MultiTierLimiter
multitierlimiter := MultiTierLimiter{
}type MultiTierLimiter struct {
}func NewMultiTierLimiter(config *MultiTierConfig, opts ...Option) *MultiTierLimiterfunc (*MultiTierLimiter) Allow(req *Request) boolfunc (*LeakyBucket) AllowN(now time.Time, n int) boolfunc (*MultiTierLimiter) GetMetrics() *MultiTierMetricsfunc (*MultiTierLimiter) Reset()func (*MultiTierLimiter) UpdateRateLimitFromHeaders(req *Request, headers map[string]string) errorfunc (*MultiTierLimiter) Wait(req *Request) errorfunc (*LeakyBucket) WaitN(ctx context.Context, n int) error// Create a new MultiTierMetrics
multitiermetrics := MultiTierMetrics{
TotalRequests: 42,
GlobalLimitHits: 42,
RouteLimitHits: 42,
ResourceLimitHits: 42,
QueuedRequests: 42,
DroppedRequests: 42,
AvgWaitTime: /* value */,
MaxWaitTime: /* value */,
BucketsActive: 42,
}type MultiTierMetrics struct {
TotalRequests int64
GlobalLimitHits int64
RouteLimitHits int64
ResourceLimitHits int64
QueuedRequests int64
DroppedRequests int64
AvgWaitTime time.Duration
MaxWaitTime time.Duration
BucketsActive int64
}// Example usage of Option
var value Option
// Initialize with appropriate valuetype Option func(*config)func WithClock(clock Clock) Optionfunc WithJitter(jitter float64) Optionfunc WithLogger(logger observe.Logger) Optionfunc WithMetrics(metrics observe.Metrics) Optionfunc WithName(name string) Optionfunc WithTracer(tracer observe.Tracer) Option// Create a new Rate
rate := Rate{
TokensPerSec: 3.14,
}type Rate struct {
TokensPerSec float64
}func NewRate(tokens int, duration time.Duration) Ratefunc Per(tokens int, duration time.Duration) Ratefunc PerHour(tokens int) Ratefunc PerMinute(tokens int) Ratefunc PerSecond(tokens int) Ratefunc (Rate) String() string// Create a new RateLimitError
ratelimiterror := RateLimitError{
Op: "example",
LimiterName: "example",
Err: error{},
RetryAfter: /* value */,
Global: true,
Bucket: "example",
Remaining: 42,
Limit: 42,
}type RateLimitError struct {
Op string
LimiterName string
Err error
RetryAfter time.Duration
Global bool
Bucket string
Remaining int
Limit int
}func (*RateLimitError) Error() stringfunc (*RateLimitError) IsRetryable() boolfunc (*RateLimitError) Unwrap() error// Create a new Request
request := Request{
Method: "example",
Endpoint: "example",
ResourceID: "example",
SubResourceID: "example",
UserID: "example",
MajorParameters: map[],
Priority: 42,
Context: /* value */,
}type Request struct {
Method string
Endpoint string
ResourceID string
SubResourceID string
UserID string
MajorParameters map[string]string
Priority int
Context context.Context
}// Create a new RouteConfig
routeconfig := RouteConfig{
Rate: Rate{},
Burst: 42,
MajorParameters: [],
}type RouteConfig struct {
Rate Rate
Burst int
MajorParameters []string
}// Example implementation of Timer
type MyTimer struct {
// Add your fields here
}
func (m MyTimer) Stop() bool {
// Implement your logic here
return
}
type Timer interface {
Stop() bool
}// Create a new TokenBucket
tokenbucket := TokenBucket{
}type TokenBucket struct {
}func NewTokenBucket(rate Rate, burst int, opts ...Option) *TokenBucketfunc (*LeakyBucket) AllowN(now time.Time, n int) boolfunc (*TokenBucket) Burst() intfunc (*LeakyBucket) Rate() Ratefunc (*TokenBucket) Tokens() float64func (*LeakyBucket) WaitN(ctx context.Context, n int) errorfunc NewBucketLimitError(limiterName, bucket string, remaining, limit int, retryAfter time.Duration) error// Example usage of NewBucketLimitError
result := NewBucketLimitError(/* parameters */)func NewGlobalRateLimitError(limiterName string, retryAfter time.Duration) error// Example usage of NewGlobalRateLimitError
result := NewGlobalRateLimitError(/* parameters */)func NewRateLimitExceededError(limiterName string, retryAfter time.Duration) error// Example usage of NewRateLimitExceededError
result := NewRateLimitExceededError(/* parameters */)