Loading documentation...
Loading documentation...
Loading documentation...
This guide covers recommended patterns and practices for using Synapse effectively in production environments.
Use meaningful, consistent key formats:
// Good: Structured, predictable keys
cache.Set(ctx, "user:12345:profile", userData)
cache.Set(ctx, "product:sku-abc:details", productData)
// Avoid: Inconsistent or opaque keys
cache.Set(ctx, "u12345", userData)
cache.Set(ctx, "abc_product_data", productData)For similarity search, keys should have semantic meaning:
// Good: Similar queries have similar keys
cache.Set(ctx, "search:red running shoes", results)
cache.Set(ctx, "search:blue running shoes", results)
// These will match with similarity search
value, _, _, _ := cache.GetSimilar(ctx, "search:red runing shoes")The similarity threshold should match your use case:
// High threshold (0.9+): Strict matching, fewer false positives
cache := synapse.New[string, string](
synapse.WithThreshold(0.9),
)
// Good for: Typo correction, near-exact matching
// Medium threshold (0.7-0.8): Balanced
cache := synapse.New[string, string](
synapse.WithThreshold(0.8),
)
// Good for: General fuzzy matching
// Low threshold (0.5-0.6): Loose matching, more results
cache := synapse.New[string, string](
synapse.WithThreshold(0.6),
)
// Good for: Semantic similarity, broad matchingConsider memory usage when sizing:
// Each entry stores:
// - Key (size depends on type)
// - Value (size depends on type)
// - Metadata: timestamps, counters, namespace, metadata map
// - Overhead: map entries, slice elements
// For string keys/values averaging 100 bytes each:
// ~300-400 bytes per entry including overhead
// 10,000 entries ≈ 3-4 MB
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
)Exact lookups are O(1), similarity search is O(n):
func getCached(ctx context.Context, cache *synapse.Cache[string, string], key string) (string, bool) {
// Fast path: exact match
if value, found := cache.Get(ctx, key); found {
return value, true
}
// Slow path: similarity search
if value, _, score, found := cache.GetSimilar(ctx, key); found && score > 0.7 {
return value, true
}
return "", false
}Balance concurrency vs. overhead:
// Low concurrency (< 10 goroutines)
synapse.WithShards(4)
// Medium concurrency (10-100 goroutines)
synapse.WithShards(16) // Default
// High concurrency (100+ goroutines)
synapse.WithShards(64)
// Very high concurrency (1000+ goroutines)
synapse.WithShards(128)More shards reduce lock contention but increase memory and similarity search overhead.
For large caches, consider partitioning:
// Use namespaces to limit similarity search scope
userCtx := synapse.WithNamespace(ctx, "user-queries")
productCtx := synapse.WithNamespace(ctx, "product-queries")
// Searches only scan entries in the same namespace
cache.GetSimilar(userCtx, "search term")Match the algorithm to your data:
// For text with typos: Levenshtein or Damerau-Levenshtein
cache.WithSimilarity(algorithms.Levenshtein)
// For fixed-length identifiers: Hamming
cache.WithSimilarity(algorithms.Hamming)
// For numeric vectors: Euclidean or Manhattan
cache.WithSimilarity(func(a, b []float64) float64 {
return algorithms.Euclidean(a, b)
})Pass context for cancellation and tracing:
func handler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() // Request context with deadline
value, found := cache.Get(ctx, key)
if !found {
// Context cancellation handled internally
value, _, _, found = cache.GetSimilar(ctx, key)
}
}Similarity search can be slow for large caches:
ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
defer cancel()
value, key, score, found := cache.GetSimilar(ctx, query)
if ctx.Err() == context.DeadlineExceeded {
// Search took too long, use fallback
}Isolate data between tenants:
func getTenantCache(ctx context.Context, tenantID string) context.Context {
return synapse.WithNamespace(ctx, fmt.Sprintf("tenant:%s", tenantID))
}
// Each tenant's operations are isolated
tenant1Ctx := getTenantCache(ctx, "tenant-1")
cache.Set(tenant1Ctx, "config", config)
// Similarity search only sees tenant-1's entries
cache.GetSimilar(tenant1Ctx, "confg")err := cache.Set(ctx, key, value)
if err != nil {
if errors.Is(err, context.Canceled) {
return fmt.Errorf("operation cancelled: %w", err)
}
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("operation timeout: %w", err)
}
return fmt.Errorf("cache set failed: %w", err)
}Design for cache misses:
func getData(ctx context.Context, id string) (Data, error) {
// Try cache first
if cached, found := cache.Get(ctx, id); found {
return cached, nil
}
// Fall back to source
data, err := fetchFromSource(ctx, id)
if err != nil {
return Data{}, err
}
// Best-effort cache population
_ = cache.Set(ctx, id, data)
return data, nil
}// Short TTL for frequently changing data
cache := synapse.New[string, MarketData](
synapse.WithTTL(5 * time.Second),
)
// Longer TTL for stable data
cache := synapse.New[string, UserProfile](
synapse.WithTTL(15 * time.Minute),
)// LRU for general workloads with temporal locality
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
synapse.WithEviction(eviction.NewLRU(10000)),
)maxSize := 10000
cache := synapse.New[string, string](
synapse.WithMaxSize(maxSize),
synapse.WithEviction(eviction.NewLRU(maxSize)), // Same size
)go test -race ./...func TestCacheContextCancellation(t *testing.T) {
cache := synapse.New[string, string]()
cache.WithSimilarity(algorithms.Levenshtein)
// Populate cache
ctx := context.Background()
for i := 0; i < 10000; i++ {
cache.Set(ctx, fmt.Sprintf("key-%d", i), "value")
}
// Test cancellation
cancelCtx, cancel := context.WithCancel(ctx)
cancel() // Cancel immediately
_, _, _, found := cache.GetSimilar(cancelCtx, "search")
if found {
t.Error("expected no result after cancellation")
}
}func BenchmarkCacheGet(b *testing.B) {
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
)
ctx := context.Background()
cache.Set(ctx, "key", "value")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Get(ctx, "key")
}
}
func BenchmarkCacheGetSimilar(b *testing.B) {
cache := synapse.New[string, string](
synapse.WithMaxSize(1000),
synapse.WithThreshold(0.8),
)
cache.WithSimilarity(algorithms.Levenshtein)
ctx := context.Background()
for i := 0; i < 1000; i++ {
cache.Set(ctx, fmt.Sprintf("key-%d", i), "value")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.GetSimilar(ctx, "key-500")
}
}MaxSize based on memory constraintsShards based on expected concurrencyThreshold appropriate for your similarity requirementsTTL for time-sensitive dataEviction policy (LRU recommended for most cases)context.Context to operations-race flagThis guide covers recommended patterns and practices for using Synapse effectively in production environments.
Use meaningful, consistent key formats:
// Good: Structured, predictable keys
cache.Set(ctx, "user:12345:profile", userData)
cache.Set(ctx, "product:sku-abc:details", productData)
// Avoid: Inconsistent or opaque keys
cache.Set(ctx, "u12345", userData)
cache.Set(ctx, "abc_product_data", productData)For similarity search, keys should have semantic meaning:
// Good: Similar queries have similar keys
cache.Set(ctx, "search:red running shoes", results)
cache.Set(ctx, "search:blue running shoes", results)
// These will match with similarity search
value, _, _, _ := cache.GetSimilar(ctx, "search:red runing shoes")The similarity threshold should match your use case:
// High threshold (0.9+): Strict matching, fewer false positives
cache := synapse.New[string, string](
synapse.WithThreshold(0.9),
)
// Good for: Typo correction, near-exact matching
// Medium threshold (0.7-0.8): Balanced
cache := synapse.New[string, string](
synapse.WithThreshold(0.8),
)
// Good for: General fuzzy matching
// Low threshold (0.5-0.6): Loose matching, more results
cache := synapse.New[string, string](
synapse.WithThreshold(0.6),
)
// Good for: Semantic similarity, broad matchingConsider memory usage when sizing:
// Each entry stores:
// - Key (size depends on type)
// - Value (size depends on type)
// - Metadata: timestamps, counters, namespace, metadata map
// - Overhead: map entries, slice elements
// For string keys/values averaging 100 bytes each:
// ~300-400 bytes per entry including overhead
// 10,000 entries ≈ 3-4 MB
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
)Exact lookups are O(1), similarity search is O(n):
func getCached(ctx context.Context, cache *synapse.Cache[string, string], key string) (string, bool) {
// Fast path: exact match
if value, found := cache.Get(ctx, key); found {
return value, true
}
// Slow path: similarity search
if value, _, score, found := cache.GetSimilar(ctx, key); found && score > 0.7 {
return value, true
}
return "", false
}Balance concurrency vs. overhead:
// Low concurrency (< 10 goroutines)
synapse.WithShards(4)
// Medium concurrency (10-100 goroutines)
synapse.WithShards(16) // Default
// High concurrency (100+ goroutines)
synapse.WithShards(64)
// Very high concurrency (1000+ goroutines)
synapse.WithShards(128)More shards reduce lock contention but increase memory and similarity search overhead.
For large caches, consider partitioning:
// Use namespaces to limit similarity search scope
userCtx := synapse.WithNamespace(ctx, "user-queries")
productCtx := synapse.WithNamespace(ctx, "product-queries")
// Searches only scan entries in the same namespace
cache.GetSimilar(userCtx, "search term")Match the algorithm to your data:
// For text with typos: Levenshtein or Damerau-Levenshtein
cache.WithSimilarity(algorithms.Levenshtein)
// For fixed-length identifiers: Hamming
cache.WithSimilarity(algorithms.Hamming)
// For numeric vectors: Euclidean or Manhattan
cache.WithSimilarity(func(a, b []float64) float64 {
return algorithms.Euclidean(a, b)
})Pass context for cancellation and tracing:
func handler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() // Request context with deadline
value, found := cache.Get(ctx, key)
if !found {
// Context cancellation handled internally
value, _, _, found = cache.GetSimilar(ctx, key)
}
}Similarity search can be slow for large caches:
ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
defer cancel()
value, key, score, found := cache.GetSimilar(ctx, query)
if ctx.Err() == context.DeadlineExceeded {
// Search took too long, use fallback
}Isolate data between tenants:
func getTenantCache(ctx context.Context, tenantID string) context.Context {
return synapse.WithNamespace(ctx, fmt.Sprintf("tenant:%s", tenantID))
}
// Each tenant's operations are isolated
tenant1Ctx := getTenantCache(ctx, "tenant-1")
cache.Set(tenant1Ctx, "config", config)
// Similarity search only sees tenant-1's entries
cache.GetSimilar(tenant1Ctx, "confg")err := cache.Set(ctx, key, value)
if err != nil {
if errors.Is(err, context.Canceled) {
return fmt.Errorf("operation cancelled: %w", err)
}
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("operation timeout: %w", err)
}
return fmt.Errorf("cache set failed: %w", err)
}Design for cache misses:
func getData(ctx context.Context, id string) (Data, error) {
// Try cache first
if cached, found := cache.Get(ctx, id); found {
return cached, nil
}
// Fall back to source
data, err := fetchFromSource(ctx, id)
if err != nil {
return Data{}, err
}
// Best-effort cache population
_ = cache.Set(ctx, id, data)
return data, nil
}// Short TTL for frequently changing data
cache := synapse.New[string, MarketData](
synapse.WithTTL(5 * time.Second),
)
// Longer TTL for stable data
cache := synapse.New[string, UserProfile](
synapse.WithTTL(15 * time.Minute),
)// LRU for general workloads with temporal locality
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
synapse.WithEviction(eviction.NewLRU(10000)),
)maxSize := 10000
cache := synapse.New[string, string](
synapse.WithMaxSize(maxSize),
synapse.WithEviction(eviction.NewLRU(maxSize)), // Same size
)go test -race ./...func TestCacheContextCancellation(t *testing.T) {
cache := synapse.New[string, string]()
cache.WithSimilarity(algorithms.Levenshtein)
// Populate cache
ctx := context.Background()
for i := 0; i < 10000; i++ {
cache.Set(ctx, fmt.Sprintf("key-%d", i), "value")
}
// Test cancellation
cancelCtx, cancel := context.WithCancel(ctx)
cancel() // Cancel immediately
_, _, _, found := cache.GetSimilar(cancelCtx, "search")
if found {
t.Error("expected no result after cancellation")
}
}func BenchmarkCacheGet(b *testing.B) {
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
)
ctx := context.Background()
cache.Set(ctx, "key", "value")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Get(ctx, "key")
}
}
func BenchmarkCacheGetSimilar(b *testing.B) {
cache := synapse.New[string, string](
synapse.WithMaxSize(1000),
synapse.WithThreshold(0.8),
)
cache.WithSimilarity(algorithms.Levenshtein)
ctx := context.Background()
for i := 0; i < 1000; i++ {
cache.Set(ctx, fmt.Sprintf("key-%d", i), "value")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.GetSimilar(ctx, "key-500")
}
}MaxSize based on memory constraintsShards based on expected concurrencyThreshold appropriate for your similarity requirementsTTL for time-sensitive dataEviction policy (LRU recommended for most cases)context.Context to operations-race flag// Good: Structured, predictable keys
cache.Set(ctx, "user:12345:profile", userData)
cache.Set(ctx, "product:sku-abc:details", productData)
// Avoid: Inconsistent or opaque keys
cache.Set(ctx, "u12345", userData)
cache.Set(ctx, "abc_product_data", productData)// Good: Similar queries have similar keys
cache.Set(ctx, "search:red running shoes", results)
cache.Set(ctx, "search:blue running shoes", results)
// These will match with similarity search
value, _, _, _ := cache.GetSimilar(ctx, "search:red runing shoes")// High threshold (0.9+): Strict matching, fewer false positives
cache := synapse.New[string, string](
synapse.WithThreshold(0.9),
)
// Good for: Typo correction, near-exact matching
// Medium threshold (0.7-0.8): Balanced
cache := synapse.New[string, string](
synapse.WithThreshold(0.8),
)
// Good for: General fuzzy matching
// Low threshold (0.5-0.6): Loose matching, more results
cache := synapse.New[string, string](
synapse.WithThreshold(0.6),
)
// Good for: Semantic similarity, broad matching// Each entry stores:
// - Key (size depends on type)
// - Value (size depends on type)
// - Metadata: timestamps, counters, namespace, metadata map
// - Overhead: map entries, slice elements
// For string keys/values averaging 100 bytes each:
// ~300-400 bytes per entry including overhead
// 10,000 entries ≈ 3-4 MB
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
)func getCached(ctx context.Context, cache *synapse.Cache[string, string], key string) (string, bool) {
// Fast path: exact match
if value, found := cache.Get(ctx, key); found {
return value, true
}
// Slow path: similarity search
if value, _, score, found := cache.GetSimilar(ctx, key); found && score > 0.7 {
return value, true
}
return "", false
}// Low concurrency (< 10 goroutines)
synapse.WithShards(4)
// Medium concurrency (10-100 goroutines)
synapse.WithShards(16) // Default
// High concurrency (100+ goroutines)
synapse.WithShards(64)
// Very high concurrency (1000+ goroutines)
synapse.WithShards(128)// Use namespaces to limit similarity search scope
userCtx := synapse.WithNamespace(ctx, "user-queries")
productCtx := synapse.WithNamespace(ctx, "product-queries")
// Searches only scan entries in the same namespace
cache.GetSimilar(userCtx, "search term")// For text with typos: Levenshtein or Damerau-Levenshtein
cache.WithSimilarity(algorithms.Levenshtein)
// For fixed-length identifiers: Hamming
cache.WithSimilarity(algorithms.Hamming)
// For numeric vectors: Euclidean or Manhattan
cache.WithSimilarity(func(a, b []float64) float64 {
return algorithms.Euclidean(a, b)
})func handler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() // Request context with deadline
value, found := cache.Get(ctx, key)
if !found {
// Context cancellation handled internally
value, _, _, found = cache.GetSimilar(ctx, key)
}
}ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
defer cancel()
value, key, score, found := cache.GetSimilar(ctx, query)
if ctx.Err() == context.DeadlineExceeded {
// Search took too long, use fallback
}func getTenantCache(ctx context.Context, tenantID string) context.Context {
return synapse.WithNamespace(ctx, fmt.Sprintf("tenant:%s", tenantID))
}
// Each tenant's operations are isolated
tenant1Ctx := getTenantCache(ctx, "tenant-1")
cache.Set(tenant1Ctx, "config", config)
// Similarity search only sees tenant-1's entries
cache.GetSimilar(tenant1Ctx, "confg")err := cache.Set(ctx, key, value)
if err != nil {
if errors.Is(err, context.Canceled) {
return fmt.Errorf("operation cancelled: %w", err)
}
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("operation timeout: %w", err)
}
return fmt.Errorf("cache set failed: %w", err)
}func getData(ctx context.Context, id string) (Data, error) {
// Try cache first
if cached, found := cache.Get(ctx, id); found {
return cached, nil
}
// Fall back to source
data, err := fetchFromSource(ctx, id)
if err != nil {
return Data{}, err
}
// Best-effort cache population
_ = cache.Set(ctx, id, data)
return data, nil
}// Short TTL for frequently changing data
cache := synapse.New[string, MarketData](
synapse.WithTTL(5 * time.Second),
)
// Longer TTL for stable data
cache := synapse.New[string, UserProfile](
synapse.WithTTL(15 * time.Minute),
)// LRU for general workloads with temporal locality
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
synapse.WithEviction(eviction.NewLRU(10000)),
)maxSize := 10000
cache := synapse.New[string, string](
synapse.WithMaxSize(maxSize),
synapse.WithEviction(eviction.NewLRU(maxSize)), // Same size
)go test -race ./...func TestCacheContextCancellation(t *testing.T) {
cache := synapse.New[string, string]()
cache.WithSimilarity(algorithms.Levenshtein)
// Populate cache
ctx := context.Background()
for i := 0; i < 10000; i++ {
cache.Set(ctx, fmt.Sprintf("key-%d", i), "value")
}
// Test cancellation
cancelCtx, cancel := context.WithCancel(ctx)
cancel() // Cancel immediately
_, _, _, found := cache.GetSimilar(cancelCtx, "search")
if found {
t.Error("expected no result after cancellation")
}
}func BenchmarkCacheGet(b *testing.B) {
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
)
ctx := context.Background()
cache.Set(ctx, "key", "value")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Get(ctx, "key")
}
}
func BenchmarkCacheGetSimilar(b *testing.B) {
cache := synapse.New[string, string](
synapse.WithMaxSize(1000),
synapse.WithThreshold(0.8),
)
cache.WithSimilarity(algorithms.Levenshtein)
ctx := context.Background()
for i := 0; i < 1000; i++ {
cache.Set(ctx, fmt.Sprintf("key-%d", i), "value")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.GetSimilar(ctx, "key-500")
}
}// Good: Structured, predictable keys
cache.Set(ctx, "user:12345:profile", userData)
cache.Set(ctx, "product:sku-abc:details", productData)
// Avoid: Inconsistent or opaque keys
cache.Set(ctx, "u12345", userData)
cache.Set(ctx, "abc_product_data", productData)// Good: Similar queries have similar keys
cache.Set(ctx, "search:red running shoes", results)
cache.Set(ctx, "search:blue running shoes", results)
// These will match with similarity search
value, _, _, _ := cache.GetSimilar(ctx, "search:red runing shoes")// High threshold (0.9+): Strict matching, fewer false positives
cache := synapse.New[string, string](
synapse.WithThreshold(0.9),
)
// Good for: Typo correction, near-exact matching
// Medium threshold (0.7-0.8): Balanced
cache := synapse.New[string, string](
synapse.WithThreshold(0.8),
)
// Good for: General fuzzy matching
// Low threshold (0.5-0.6): Loose matching, more results
cache := synapse.New[string, string](
synapse.WithThreshold(0.6),
)
// Good for: Semantic similarity, broad matching// Each entry stores:
// - Key (size depends on type)
// - Value (size depends on type)
// - Metadata: timestamps, counters, namespace, metadata map
// - Overhead: map entries, slice elements
// For string keys/values averaging 100 bytes each:
// ~300-400 bytes per entry including overhead
// 10,000 entries ≈ 3-4 MB
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
)func getCached(ctx context.Context, cache *synapse.Cache[string, string], key string) (string, bool) {
// Fast path: exact match
if value, found := cache.Get(ctx, key); found {
return value, true
}
// Slow path: similarity search
if value, _, score, found := cache.GetSimilar(ctx, key); found && score > 0.7 {
return value, true
}
return "", false
}// Low concurrency (< 10 goroutines)
synapse.WithShards(4)
// Medium concurrency (10-100 goroutines)
synapse.WithShards(16) // Default
// High concurrency (100+ goroutines)
synapse.WithShards(64)
// Very high concurrency (1000+ goroutines)
synapse.WithShards(128)// Use namespaces to limit similarity search scope
userCtx := synapse.WithNamespace(ctx, "user-queries")
productCtx := synapse.WithNamespace(ctx, "product-queries")
// Searches only scan entries in the same namespace
cache.GetSimilar(userCtx, "search term")// For text with typos: Levenshtein or Damerau-Levenshtein
cache.WithSimilarity(algorithms.Levenshtein)
// For fixed-length identifiers: Hamming
cache.WithSimilarity(algorithms.Hamming)
// For numeric vectors: Euclidean or Manhattan
cache.WithSimilarity(func(a, b []float64) float64 {
return algorithms.Euclidean(a, b)
})func handler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() // Request context with deadline
value, found := cache.Get(ctx, key)
if !found {
// Context cancellation handled internally
value, _, _, found = cache.GetSimilar(ctx, key)
}
}ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
defer cancel()
value, key, score, found := cache.GetSimilar(ctx, query)
if ctx.Err() == context.DeadlineExceeded {
// Search took too long, use fallback
}func getTenantCache(ctx context.Context, tenantID string) context.Context {
return synapse.WithNamespace(ctx, fmt.Sprintf("tenant:%s", tenantID))
}
// Each tenant's operations are isolated
tenant1Ctx := getTenantCache(ctx, "tenant-1")
cache.Set(tenant1Ctx, "config", config)
// Similarity search only sees tenant-1's entries
cache.GetSimilar(tenant1Ctx, "confg")err := cache.Set(ctx, key, value)
if err != nil {
if errors.Is(err, context.Canceled) {
return fmt.Errorf("operation cancelled: %w", err)
}
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("operation timeout: %w", err)
}
return fmt.Errorf("cache set failed: %w", err)
}func getData(ctx context.Context, id string) (Data, error) {
// Try cache first
if cached, found := cache.Get(ctx, id); found {
return cached, nil
}
// Fall back to source
data, err := fetchFromSource(ctx, id)
if err != nil {
return Data{}, err
}
// Best-effort cache population
_ = cache.Set(ctx, id, data)
return data, nil
}// Short TTL for frequently changing data
cache := synapse.New[string, MarketData](
synapse.WithTTL(5 * time.Second),
)
// Longer TTL for stable data
cache := synapse.New[string, UserProfile](
synapse.WithTTL(15 * time.Minute),
)// LRU for general workloads with temporal locality
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
synapse.WithEviction(eviction.NewLRU(10000)),
)maxSize := 10000
cache := synapse.New[string, string](
synapse.WithMaxSize(maxSize),
synapse.WithEviction(eviction.NewLRU(maxSize)), // Same size
)go test -race ./...func TestCacheContextCancellation(t *testing.T) {
cache := synapse.New[string, string]()
cache.WithSimilarity(algorithms.Levenshtein)
// Populate cache
ctx := context.Background()
for i := 0; i < 10000; i++ {
cache.Set(ctx, fmt.Sprintf("key-%d", i), "value")
}
// Test cancellation
cancelCtx, cancel := context.WithCancel(ctx)
cancel() // Cancel immediately
_, _, _, found := cache.GetSimilar(cancelCtx, "search")
if found {
t.Error("expected no result after cancellation")
}
}func BenchmarkCacheGet(b *testing.B) {
cache := synapse.New[string, string](
synapse.WithMaxSize(10000),
)
ctx := context.Background()
cache.Set(ctx, "key", "value")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Get(ctx, "key")
}
}
func BenchmarkCacheGetSimilar(b *testing.B) {
cache := synapse.New[string, string](
synapse.WithMaxSize(1000),
synapse.WithThreshold(0.8),
)
cache.WithSimilarity(algorithms.Levenshtein)
ctx := context.Background()
for i := 0; i < 1000; i++ {
cache.Set(ctx, fmt.Sprintf("key-%d", i), "value")
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.GetSimilar(ctx, "key-500")
}
}