Loading documentation...
Loading documentation...
Loading documentation...
Note: This is a developer-maintained documentation page. The content here is not auto-generated and should be updated manually to explain the core concepts and architecture of the logs package.
Import Path: github.com/kolosys/helix/logs
Package logs provides a high-performance, context-aware structured logging library with zero-allocation hot paths, multiple output formats, and extensible hook system.
The logs package is designed for high-performance logging in production environments:
Logger.Log()
↓
Level Check (skip if below threshold)
↓
Sampler Check (skip if sampled out)
↓
Create Entry (from pool)
↓
Add Fields (default + call-site)
↓
Run Hooks (pre-processing)
↓
Format Entry (TextFormatter/JSONFormatter)
↓
Write Output (sync or async)
↓
Return Entry to Poolsync.Pool for zero allocationscontext.ContextCreate a logger with default settings:
log := logs.New()
log.Info("server started", logs.Int("port", 8080))Create a logger with options:
log := logs.New(
logs.WithLevel(logs.DebugLevel),
logs.WithFormatter(&logs.JSONFormatter{}),
logs.WithOutput(os.Stdout),
logs.WithCaller(),
logs.WithAsync(1024),
)Log levels in order of severity:
TraceLevel - Most verbose, for detailed debuggingDebugLevel - Debug informationInfoLevel - General information (default)WarnLevel - Warning messagesErrorLevel - Error conditionsFatalLevel - Fatal errors (exits after logging)PanicLevel - Panic errors (panics after logging)Fields provide type-safe, structured data:
logs.Info("user created",
logs.String("user_id", "123"),
logs.Int("age", 30),
logs.Bool("active", true),
logs.Duration("latency", time.Since(start)),
logs.Err(err),
)The package provides type-safe field builders:
String(), Strings() (slice)Int(), Int8(), Int16(), Int32(), Int64()Uint(), Uint8(), Uint16(), Uint32(), Uint64()Float32(), Float64()Bool()Time(), Duration()Err(), NamedErr()Any(), JSON(), Bytes()Stringer() for types implementing fmt.StringerLog with context to automatically include context fields:
log.InfoContext(ctx, "request processed",
logs.Duration("latency", time.Since(start)),
)
// Context fields are automatically extracted if set via logs.WithContext()Create child loggers with additional fields:
reqLog := log.With(
logs.String("request_id", requestID),
logs.String("user_id", userID),
)
reqLog.Info("processing request") // Includes request_id and user_id
reqLog.Error("request failed", logs.Err(err)) // Also includes request_id and user_idimport "github.com/kolosys/helix/logs"
log := logs.New()
log.Info("server started", logs.Int("port", 8080))
log.Warn("deprecated API used", logs.String("endpoint", "/old"))
log.Error("request failed", logs.Err(err))log := logs.New(
logs.WithLevel(logs.InfoLevel),
logs.WithFormatter(&logs.JSONFormatter{}),
logs.WithOutput(os.Stdout),
)
log.Info("user created",
logs.String("user_id", "123"),
logs.String("email", "user@example.com"),
)
// Output: {"level":"info","time":"2024-01-15T10:30:00Z","message":"user created","user_id":"123","email":"user@example.com"}log := logs.New(
logs.WithLevel(logs.DebugLevel),
logs.WithFormatter(&logs.TextFormatter{
DisableColors: false,
FullTimestamp: true,
}),
logs.WithCaller(),
)
log.Debug("processing request",
logs.String("method", "GET"),
logs.String("path", "/users"),
)// Set context fields (typically in middleware)
ctx := logs.WithContext(r.Context(),
logs.String("request_id", requestID),
logs.String("user_id", userID),
)
// Log with context (fields automatically included)
log.InfoContext(ctx, "request processed",
logs.Duration("latency", time.Since(start)),
)Use async logging for high-throughput scenarios:
log := logs.New(
logs.WithAsync(1024), // Buffer size
logs.WithFormatter(&logs.JSONFormatter{}),
)
// Logs are written asynchronously
log.Info("high volume log", logs.Int("count", 1000))Use sampling to reduce log volume:
sampler := logs.NewRateSampler(100, time.Second) // 100 logs per second
log := logs.New(
logs.WithSampler(sampler),
logs.WithFormatter(&logs.JSONFormatter{}),
)
// Only 100 logs per second will be written
for i := 0; i < 10000; i++ {
log.Info("high volume", logs.Int("i", i))
}Add hooks for custom processing (metrics, alerting, etc.):
type metricsHook struct{}
func (h *metricsHook) Levels() []logs.Level {
return []logs.Level{logs.ErrorLevel, logs.FatalLevel}
}
func (h *metricsHook) Fire(entry *logs.Entry) error {
metrics.IncrementErrorCounter(entry.Level.String())
return nil
}
log := logs.New(
logs.WithHooks(&metricsHook{}),
)The logs package uses several techniques to avoid allocations:
sync.PoolFields are type-safe rather than using map[string]interface{} because:
Formatters are pluggable because:
Context-aware logging provides:
Problem: Async loggers must be closed to flush pending logs.
Solution: Always close async loggers:
log := logs.New(logs.WithAsync(1024))
defer log.Close() // Flushes pending logs
log.Info("message")
// Logger must be closed to ensure message is writtenProblem: Logging after Close() may panic or lose logs.
Solution: Ensure logger lifecycle matches application:
log := logs.New(logs.WithAsync(1024))
// In shutdown handler
s.OnStop(func(ctx context.Context, s *helix.Server) {
log.Close() // Flush logs before shutdown
// Don't log after this point
})Problem: Creating fields with Any() for simple types causes allocations.
Solution: Use type-specific field builders:
// ❌ Wrong - causes allocation
log.Info("message", logs.Any("count", 42))
// ✅ Correct - zero allocation
log.Info("message", logs.Int("count", 42))Problem: Default level is InfoLevel, so debug logs are ignored.
Solution: Set appropriate level for environment:
// Development
log := logs.New(logs.WithLevel(logs.DebugLevel))
// Production
log := logs.New(logs.WithLevel(logs.InfoLevel))Helix middleware automatically uses the logs package:
import "github.com/kolosys/helix/middleware"
s := helix.New()
s.Use(middleware.Logger(middleware.LogFormatJSON))Add request-scoped fields in middleware:
func requestIDMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestID := generateRequestID()
ctx := logs.WithContext(r.Context(),
logs.String("request_id", requestID),
)
next.ServeHTTP(w, r.WithContext(ctx))
})
}Use hooks to send logs to metrics systems:
type prometheusHook struct {
counter *prometheus.CounterVec
}
func (h *prometheusHook) Levels() []logs.Level {
return []logs.Level{logs.ErrorLevel, logs.FatalLevel}
}
func (h *prometheusHook) Fire(entry *logs.Entry) error {
h.counter.WithLabelValues(entry.Level.String()).Inc()
return nil
}
log := logs.New(logs.WithHooks(&prometheusHook{counter: errorCounter}))Send logs to external services via hooks:
type cloudWatchHook struct {
client *cloudwatchlogs.Client
}
func (h *cloudWatchHook) Levels() []logs.Level {
return []logs.Level{} // All levels
}
func (h *cloudWatchHook) Fire(entry *logs.Entry) error {
// Send to CloudWatch
return h.client.PutLogEvents(...)
}This documentation should be updated by package maintainers to reflect the actual architecture and design patterns used.
Note: This is a developer-maintained documentation page. The content here is not auto-generated and should be updated manually to explain the core concepts and architecture of the logs package.
Import Path: github.com/kolosys/helix/logs
Package logs provides a high-performance, context-aware structured logging library with zero-allocation hot paths, multiple output formats, and extensible hook system.
The logs package is designed for high-performance logging in production environments:
Logger.Log()
↓
Level Check (skip if below threshold)
↓
Sampler Check (skip if sampled out)
↓
Create Entry (from pool)
↓
Add Fields (default + call-site)
↓
Run Hooks (pre-processing)
↓
Format Entry (TextFormatter/JSONFormatter)
↓
Write Output (sync or async)
↓
Return Entry to Poolsync.Pool for zero allocationscontext.ContextCreate a logger with default settings:
log := logs.New()
log.Info("server started", logs.Int("port", 8080))Create a logger with options:
log := logs.New(
logs.WithLevel(logs.DebugLevel),
logs.WithFormatter(&logs.JSONFormatter{}),
logs.WithOutput(os.Stdout),
logs.WithCaller(),
logs.WithAsync(1024),
)Log levels in order of severity:
TraceLevel - Most verbose, for detailed debuggingDebugLevel - Debug informationInfoLevel - General information (default)WarnLevel - Warning messagesErrorLevel - Error conditionsFatalLevel - Fatal errors (exits after logging)PanicLevel - Panic errors (panics after logging)Fields provide type-safe, structured data:
logs.Info("user created",
logs.String("user_id", "123"),
logs.Int("age", 30),
logs.Bool("active", true),
logs.Duration("latency", time.Since(start)),
logs.Err(err),
)The package provides type-safe field builders:
String(), Strings() (slice)Int(), Int8(), Int16(), Int32(), Int64()Uint(), Uint8(), Uint16(), Uint32(), Uint64()Float32(), Float64()Bool()Time(), Duration()Err(), NamedErr()Any(), JSON(), Bytes()Stringer() for types implementing fmt.StringerLog with context to automatically include context fields:
log.InfoContext(ctx, "request processed",
logs.Duration("latency", time.Since(start)),
)
// Context fields are automatically extracted if set via logs.WithContext()Create child loggers with additional fields:
reqLog := log.With(
logs.String("request_id", requestID),
logs.String("user_id", userID),
)
reqLog.Info("processing request") // Includes request_id and user_id
reqLog.Error("request failed", logs.Err(err)) // Also includes request_id and user_idimport "github.com/kolosys/helix/logs"
log := logs.New()
log.Info("server started", logs.Int("port", 8080))
log.Warn("deprecated API used", logs.String("endpoint", "/old"))
log.Error("request failed", logs.Err(err))log := logs.New(
logs.WithLevel(logs.InfoLevel),
logs.WithFormatter(&logs.JSONFormatter{}),
logs.WithOutput(os.Stdout),
)
log.Info("user created",
logs.String("user_id", "123"),
logs.String("email", "user@example.com"),
)
// Output: {"level":"info","time":"2024-01-15T10:30:00Z","message":"user created","user_id":"123","email":"user@example.com"}log := logs.New(
logs.WithLevel(logs.DebugLevel),
logs.WithFormatter(&logs.TextFormatter{
DisableColors: false,
FullTimestamp: true,
}),
logs.WithCaller(),
)
log.Debug("processing request",
logs.String("method", "GET"),
logs.String("path", "/users"),
)// Set context fields (typically in middleware)
ctx := logs.WithContext(r.Context(),
logs.String("request_id", requestID),
logs.String("user_id", userID),
)
// Log with context (fields automatically included)
log.InfoContext(ctx, "request processed",
logs.Duration("latency", time.Since(start)),
)Use async logging for high-throughput scenarios:
log := logs.New(
logs.WithAsync(1024), // Buffer size
logs.WithFormatter(&logs.JSONFormatter{}),
)
// Logs are written asynchronously
log.Info("high volume log", logs.Int("count", 1000))Use sampling to reduce log volume:
sampler := logs.NewRateSampler(100, time.Second) // 100 logs per second
log := logs.New(
logs.WithSampler(sampler),
logs.WithFormatter(&logs.JSONFormatter{}),
)
// Only 100 logs per second will be written
for i := 0; i < 10000; i++ {
log.Info("high volume", logs.Int("i", i))
}Add hooks for custom processing (metrics, alerting, etc.):
type metricsHook struct{}
func (h *metricsHook) Levels() []logs.Level {
return []logs.Level{logs.ErrorLevel, logs.FatalLevel}
}
func (h *metricsHook) Fire(entry *logs.Entry) error {
metrics.IncrementErrorCounter(entry.Level.String())
return nil
}
log := logs.New(
logs.WithHooks(&metricsHook{}),
)The logs package uses several techniques to avoid allocations:
sync.PoolFields are type-safe rather than using map[string]interface{} because:
Formatters are pluggable because:
Context-aware logging provides:
Problem: Async loggers must be closed to flush pending logs.
Solution: Always close async loggers:
log := logs.New(logs.WithAsync(1024))
defer log.Close() // Flushes pending logs
log.Info("message")
// Logger must be closed to ensure message is writtenProblem: Logging after Close() may panic or lose logs.
Solution: Ensure logger lifecycle matches application:
log := logs.New(logs.WithAsync(1024))
// In shutdown handler
s.OnStop(func(ctx context.Context, s *helix.Server) {
log.Close() // Flush logs before shutdown
// Don't log after this point
})Problem: Creating fields with Any() for simple types causes allocations.
Solution: Use type-specific field builders:
// ❌ Wrong - causes allocation
log.Info("message", logs.Any("count", 42))
// ✅ Correct - zero allocation
log.Info("message", logs.Int("count", 42))Problem: Default level is InfoLevel, so debug logs are ignored.
Solution: Set appropriate level for environment:
// Development
log := logs.New(logs.WithLevel(logs.DebugLevel))
// Production
log := logs.New(logs.WithLevel(logs.InfoLevel))Helix middleware automatically uses the logs package:
import "github.com/kolosys/helix/middleware"
s := helix.New()
s.Use(middleware.Logger(middleware.LogFormatJSON))Add request-scoped fields in middleware:
func requestIDMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestID := generateRequestID()
ctx := logs.WithContext(r.Context(),
logs.String("request_id", requestID),
)
next.ServeHTTP(w, r.WithContext(ctx))
})
}Use hooks to send logs to metrics systems:
type prometheusHook struct {
counter *prometheus.CounterVec
}
func (h *prometheusHook) Levels() []logs.Level {
return []logs.Level{logs.ErrorLevel, logs.FatalLevel}
}
func (h *prometheusHook) Fire(entry *logs.Entry) error {
h.counter.WithLabelValues(entry.Level.String()).Inc()
return nil
}
log := logs.New(logs.WithHooks(&prometheusHook{counter: errorCounter}))Send logs to external services via hooks:
type cloudWatchHook struct {
client *cloudwatchlogs.Client
}
func (h *cloudWatchHook) Levels() []logs.Level {
return []logs.Level{} // All levels
}
func (h *cloudWatchHook) Fire(entry *logs.Entry) error {
// Send to CloudWatch
return h.client.PutLogEvents(...)
}This documentation should be updated by package maintainers to reflect the actual architecture and design patterns used.
Logger.Log()
↓
Level Check (skip if below threshold)
↓
Sampler Check (skip if sampled out)
↓
Create Entry (from pool)
↓
Add Fields (default + call-site)
↓
Run Hooks (pre-processing)
↓
Format Entry (TextFormatter/JSONFormatter)
↓
Write Output (sync or async)
↓
Return Entry to Poollog := logs.New()
log.Info("server started", logs.Int("port", 8080))log := logs.New(
logs.WithLevel(logs.DebugLevel),
logs.WithFormatter(&logs.JSONFormatter{}),
logs.WithOutput(os.Stdout),
logs.WithCaller(),
logs.WithAsync(1024),
)logs.Info("user created",
logs.String("user_id", "123"),
logs.Int("age", 30),
logs.Bool("active", true),
logs.Duration("latency", time.Since(start)),
logs.Err(err),
)log.InfoContext(ctx, "request processed",
logs.Duration("latency", time.Since(start)),
)
// Context fields are automatically extracted if set via logs.WithContext()reqLog := log.With(
logs.String("request_id", requestID),
logs.String("user_id", userID),
)
reqLog.Info("processing request") // Includes request_id and user_id
reqLog.Error("request failed", logs.Err(err)) // Also includes request_id and user_idimport "github.com/kolosys/helix/logs"
log := logs.New()
log.Info("server started", logs.Int("port", 8080))
log.Warn("deprecated API used", logs.String("endpoint", "/old"))
log.Error("request failed", logs.Err(err))log := logs.New(
logs.WithLevel(logs.InfoLevel),
logs.WithFormatter(&logs.JSONFormatter{}),
logs.WithOutput(os.Stdout),
)
log.Info("user created",
logs.String("user_id", "123"),
logs.String("email", "user@example.com"),
)
// Output: {"level":"info","time":"2024-01-15T10:30:00Z","message":"user created","user_id":"123","email":"user@example.com"}log := logs.New(
logs.WithLevel(logs.DebugLevel),
logs.WithFormatter(&logs.TextFormatter{
DisableColors: false,
FullTimestamp: true,
}),
logs.WithCaller(),
)
log.Debug("processing request",
logs.String("method", "GET"),
logs.String("path", "/users"),
)// Set context fields (typically in middleware)
ctx := logs.WithContext(r.Context(),
logs.String("request_id", requestID),
logs.String("user_id", userID),
)
// Log with context (fields automatically included)
log.InfoContext(ctx, "request processed",
logs.Duration("latency", time.Since(start)),
)log := logs.New(
logs.WithAsync(1024), // Buffer size
logs.WithFormatter(&logs.JSONFormatter{}),
)
// Logs are written asynchronously
log.Info("high volume log", logs.Int("count", 1000))sampler := logs.NewRateSampler(100, time.Second) // 100 logs per second
log := logs.New(
logs.WithSampler(sampler),
logs.WithFormatter(&logs.JSONFormatter{}),
)
// Only 100 logs per second will be written
for i := 0; i < 10000; i++ {
log.Info("high volume", logs.Int("i", i))
}type metricsHook struct{}
func (h *metricsHook) Levels() []logs.Level {
return []logs.Level{logs.ErrorLevel, logs.FatalLevel}
}
func (h *metricsHook) Fire(entry *logs.Entry) error {
metrics.IncrementErrorCounter(entry.Level.String())
return nil
}
log := logs.New(
logs.WithHooks(&metricsHook{}),
)log := logs.New(logs.WithAsync(1024))
defer log.Close() // Flushes pending logs
log.Info("message")
// Logger must be closed to ensure message is writtenlog := logs.New(logs.WithAsync(1024))
// In shutdown handler
s.OnStop(func(ctx context.Context, s *helix.Server) {
log.Close() // Flush logs before shutdown
// Don't log after this point
})// ❌ Wrong - causes allocation
log.Info("message", logs.Any("count", 42))
// ✅ Correct - zero allocation
log.Info("message", logs.Int("count", 42))// Development
log := logs.New(logs.WithLevel(logs.DebugLevel))
// Production
log := logs.New(logs.WithLevel(logs.InfoLevel))import "github.com/kolosys/helix/middleware"
s := helix.New()
s.Use(middleware.Logger(middleware.LogFormatJSON))func requestIDMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestID := generateRequestID()
ctx := logs.WithContext(r.Context(),
logs.String("request_id", requestID),
)
next.ServeHTTP(w, r.WithContext(ctx))
})
}type prometheusHook struct {
counter *prometheus.CounterVec
}
func (h *prometheusHook) Levels() []logs.Level {
return []logs.Level{logs.ErrorLevel, logs.FatalLevel}
}
func (h *prometheusHook) Fire(entry *logs.Entry) error {
h.counter.WithLabelValues(entry.Level.String()).Inc()
return nil
}
log := logs.New(logs.WithHooks(&prometheusHook{counter: errorCounter}))type cloudWatchHook struct {
client *cloudwatchlogs.Client
}
func (h *cloudWatchHook) Levels() []logs.Level {
return []logs.Level{} // All levels
}
func (h *cloudWatchHook) Fire(entry *logs.Entry) error {
// Send to CloudWatch
return h.client.PutLogEvents(...)
}Logger.Log()
↓
Level Check (skip if below threshold)
↓
Sampler Check (skip if sampled out)
↓
Create Entry (from pool)
↓
Add Fields (default + call-site)
↓
Run Hooks (pre-processing)
↓
Format Entry (TextFormatter/JSONFormatter)
↓
Write Output (sync or async)
↓
Return Entry to Poollog := logs.New()
log.Info("server started", logs.Int("port", 8080))log := logs.New(
logs.WithLevel(logs.DebugLevel),
logs.WithFormatter(&logs.JSONFormatter{}),
logs.WithOutput(os.Stdout),
logs.WithCaller(),
logs.WithAsync(1024),
)logs.Info("user created",
logs.String("user_id", "123"),
logs.Int("age", 30),
logs.Bool("active", true),
logs.Duration("latency", time.Since(start)),
logs.Err(err),
)log.InfoContext(ctx, "request processed",
logs.Duration("latency", time.Since(start)),
)
// Context fields are automatically extracted if set via logs.WithContext()reqLog := log.With(
logs.String("request_id", requestID),
logs.String("user_id", userID),
)
reqLog.Info("processing request") // Includes request_id and user_id
reqLog.Error("request failed", logs.Err(err)) // Also includes request_id and user_idimport "github.com/kolosys/helix/logs"
log := logs.New()
log.Info("server started", logs.Int("port", 8080))
log.Warn("deprecated API used", logs.String("endpoint", "/old"))
log.Error("request failed", logs.Err(err))log := logs.New(
logs.WithLevel(logs.InfoLevel),
logs.WithFormatter(&logs.JSONFormatter{}),
logs.WithOutput(os.Stdout),
)
log.Info("user created",
logs.String("user_id", "123"),
logs.String("email", "user@example.com"),
)
// Output: {"level":"info","time":"2024-01-15T10:30:00Z","message":"user created","user_id":"123","email":"user@example.com"}log := logs.New(
logs.WithLevel(logs.DebugLevel),
logs.WithFormatter(&logs.TextFormatter{
DisableColors: false,
FullTimestamp: true,
}),
logs.WithCaller(),
)
log.Debug("processing request",
logs.String("method", "GET"),
logs.String("path", "/users"),
)// Set context fields (typically in middleware)
ctx := logs.WithContext(r.Context(),
logs.String("request_id", requestID),
logs.String("user_id", userID),
)
// Log with context (fields automatically included)
log.InfoContext(ctx, "request processed",
logs.Duration("latency", time.Since(start)),
)log := logs.New(
logs.WithAsync(1024), // Buffer size
logs.WithFormatter(&logs.JSONFormatter{}),
)
// Logs are written asynchronously
log.Info("high volume log", logs.Int("count", 1000))sampler := logs.NewRateSampler(100, time.Second) // 100 logs per second
log := logs.New(
logs.WithSampler(sampler),
logs.WithFormatter(&logs.JSONFormatter{}),
)
// Only 100 logs per second will be written
for i := 0; i < 10000; i++ {
log.Info("high volume", logs.Int("i", i))
}type metricsHook struct{}
func (h *metricsHook) Levels() []logs.Level {
return []logs.Level{logs.ErrorLevel, logs.FatalLevel}
}
func (h *metricsHook) Fire(entry *logs.Entry) error {
metrics.IncrementErrorCounter(entry.Level.String())
return nil
}
log := logs.New(
logs.WithHooks(&metricsHook{}),
)log := logs.New(logs.WithAsync(1024))
defer log.Close() // Flushes pending logs
log.Info("message")
// Logger must be closed to ensure message is writtenlog := logs.New(logs.WithAsync(1024))
// In shutdown handler
s.OnStop(func(ctx context.Context, s *helix.Server) {
log.Close() // Flush logs before shutdown
// Don't log after this point
})// ❌ Wrong - causes allocation
log.Info("message", logs.Any("count", 42))
// ✅ Correct - zero allocation
log.Info("message", logs.Int("count", 42))// Development
log := logs.New(logs.WithLevel(logs.DebugLevel))
// Production
log := logs.New(logs.WithLevel(logs.InfoLevel))import "github.com/kolosys/helix/middleware"
s := helix.New()
s.Use(middleware.Logger(middleware.LogFormatJSON))func requestIDMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
requestID := generateRequestID()
ctx := logs.WithContext(r.Context(),
logs.String("request_id", requestID),
)
next.ServeHTTP(w, r.WithContext(ctx))
})
}type prometheusHook struct {
counter *prometheus.CounterVec
}
func (h *prometheusHook) Levels() []logs.Level {
return []logs.Level{logs.ErrorLevel, logs.FatalLevel}
}
func (h *prometheusHook) Fire(entry *logs.Entry) error {
h.counter.WithLabelValues(entry.Level.String()).Inc()
return nil
}
log := logs.New(logs.WithHooks(&prometheusHook{counter: errorCounter}))type cloudWatchHook struct {
client *cloudwatchlogs.Client
}
func (h *cloudWatchHook) Levels() []logs.Level {
return []logs.Level{} // All levels
}
func (h *cloudWatchHook) Fire(entry *logs.Entry) error {
// Send to CloudWatch
return h.client.PutLogEvents(...)
}