Observability
zyn emits capitan hooks for full visibility into LLM interactions.
Available Signals
Request Lifecycle
| Signal | When | Key Fields |
|---|---|---|
RequestStarted | Before pipeline | request.id, synapse.type, input |
RequestCompleted | After success | request.id, output, response |
RequestFailed | After pipeline failure | request.id, error |
ResponseParseFailed | After parse/validation error | request.id, response, error.type |
Provider Lifecycle
| Signal | When | Key Fields |
|---|---|---|
ProviderCallStarted | Before HTTP call | provider, model |
ProviderCallCompleted | After HTTP success | provider, tokens, duration.ms |
ProviderCallFailed | After HTTP failure | provider, http.status.code, error |
Basic Usage
import (
"github.com/zoobz-io/capitan"
"github.com/zoobz-io/zyn"
)
// Hook into specific signals
capitan.Hook(zyn.RequestCompleted, func(ctx context.Context, e *capitan.Event) {
requestID, _ := zyn.RequestIDKey.From(e)
synapseType, _ := zyn.SynapseTypeKey.From(e)
log.Printf("Request %s (%s) completed", requestID, synapseType)
})
Field Access
Use typed keys for type-safe field access:
capitan.Hook(zyn.ProviderCallCompleted, func(ctx context.Context, e *capitan.Event) {
// Typed accessors
model, _ := zyn.ModelKey.From(e)
tokens, _ := zyn.TotalTokensKey.From(e)
duration, _ := zyn.DurationMsKey.From(e)
provider, _ := zyn.ProviderKey.From(e)
log.Printf("[%s] %s: %d tokens in %dms", provider, model, tokens, duration)
})
Available Keys
Request Fields
zyn.RequestIDKey // string - Unique request identifier
zyn.SynapseTypeKey // string - "binary", "classification", etc.
zyn.PromptTaskKey // string - Task description
zyn.TemperatureKey // float64 - Temperature setting used
zyn.InputKey // string - Input text
zyn.OutputKey // string - Parsed result (JSON)
zyn.ResponseKey // string - Raw LLM response
zyn.ErrorKey // string - Error message
zyn.ErrorTypeKey // string - "parse_error", "validation_error"
Provider Fields
zyn.ProviderKey // string - "openai", "anthropic", etc.
zyn.ModelKey // string - Model used
zyn.PromptTokensKey // int - Prompt token count
zyn.CompletionTokensKey // int - Completion token count
zyn.TotalTokensKey // int - Total token count
zyn.DurationMsKey // int - Call duration in ms
zyn.HTTPStatusCodeKey // int - HTTP status code
zyn.ResponseIDKey // string - Provider's response ID
zyn.ResponseFinishReasonKey // string - "stop", "length", etc.
zyn.ResponseCreatedKey // int - Response creation timestamp
zyn.APIErrorTypeKey // string - API error type
zyn.APIErrorCodeKey // string - API error code
Common Patterns
Token Usage Tracking
var totalTokens int64
capitan.Hook(zyn.ProviderCallCompleted, func(ctx context.Context, e *capitan.Event) {
tokens, _ := zyn.TotalTokensKey.From(e)
atomic.AddInt64(&totalTokens, int64(tokens))
})
Error Monitoring
capitan.Hook(zyn.RequestFailed, func(ctx context.Context, e *capitan.Event) {
requestID, _ := zyn.RequestIDKey.From(e)
err, _ := zyn.ErrorKey.From(e)
synapseType, _ := zyn.SynapseTypeKey.From(e)
log.Printf("FAILED [%s] %s: %s", synapseType, requestID, err)
metrics.Increment("synapse_failures", "type", synapseType)
})
capitan.Hook(zyn.ResponseParseFailed, func(ctx context.Context, e *capitan.Event) {
errorType, _ := zyn.ErrorTypeKey.From(e)
response, _ := zyn.ResponseKey.From(e)
if errorType == "parse_error" {
log.Printf("Parse error on response: %s", response[:100])
}
})
Latency Tracking
capitan.Hook(zyn.ProviderCallCompleted, func(ctx context.Context, e *capitan.Event) {
duration, _ := zyn.DurationMsKey.From(e)
model, _ := zyn.ModelKey.From(e)
metrics.Histogram("llm_latency_ms", float64(duration), "model", model)
})
Request Correlation
All signals include request.id for tracing:
capitan.Hook(zyn.RequestStarted, func(ctx context.Context, e *capitan.Event) {
requestID, _ := zyn.RequestIDKey.From(e)
log.Printf("Starting request %s", requestID)
})
capitan.Hook(zyn.ProviderCallCompleted, func(ctx context.Context, e *capitan.Event) {
// Same request ID available
requestID, _ := zyn.RequestIDKey.From(e)
log.Printf("Provider call for request %s completed", requestID)
})
capitan.Hook(zyn.RequestCompleted, func(ctx context.Context, e *capitan.Event) {
requestID, _ := zyn.RequestIDKey.From(e)
log.Printf("Request %s completed", requestID)
})
Global Observer
Observe all events for debugging:
observer := capitan.Observe(func(ctx context.Context, e *capitan.Event) {
log.Printf("Event: %s", e.Signal())
})
defer observer.Close()
Integration Examples
Prometheus Metrics
var (
tokenCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{Name: "llm_tokens_total"},
[]string{"model", "type"},
)
latencyHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{Name: "llm_latency_seconds"},
[]string{"model"},
)
)
capitan.Hook(zyn.ProviderCallCompleted, func(ctx context.Context, e *capitan.Event) {
model, _ := zyn.ModelKey.From(e)
prompt, _ := zyn.PromptTokensKey.From(e)
completion, _ := zyn.CompletionTokensKey.From(e)
duration, _ := zyn.DurationMsKey.From(e)
tokenCounter.WithLabelValues(model, "prompt").Add(float64(prompt))
tokenCounter.WithLabelValues(model, "completion").Add(float64(completion))
latencyHistogram.WithLabelValues(model).Observe(float64(duration) / 1000)
})
Structured Logging
capitan.Hook(zyn.RequestCompleted, func(ctx context.Context, e *capitan.Event) {
requestID, _ := zyn.RequestIDKey.From(e)
synapseType, _ := zyn.SynapseTypeKey.From(e)
input, _ := zyn.InputKey.From(e)
output, _ := zyn.OutputKey.From(e)
slog.Info("synapse_completed",
"request_id", requestID,
"type", synapseType,
"input_len", len(input),
"output", output,
)
})
Next Steps
- Testing Guide - Test with hooks
- capitan Documentation - Full hooks reference