The mockLogger's errors slice was being accessed concurrently from multiple goroutines (the error bus consumer and the test goroutine), causing race conditions when running tests with the race detector. Added sync.Mutex to protect the errors slice and proper locking when accessing it in test assertions.
213 lines
4.3 KiB
Go
213 lines
4.3 KiB
Go
package errorbus
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"git.dcentral.systems/toolz/goplt/pkg/logger"
|
|
)
|
|
|
|
func TestNewChannelBus(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
mockLogger := &mockLogger{}
|
|
bus := NewChannelBus(mockLogger, 100)
|
|
|
|
if bus == nil {
|
|
t.Fatal("Expected bus, got nil")
|
|
}
|
|
|
|
if bus.errors == nil {
|
|
t.Error("Expected errors channel, got nil")
|
|
}
|
|
|
|
if bus.logger == nil {
|
|
t.Error("Expected logger, got nil")
|
|
}
|
|
|
|
// Clean up
|
|
_ = bus.Close()
|
|
}
|
|
|
|
func TestNewChannelBus_DefaultBufferSize(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
mockLogger := &mockLogger{}
|
|
bus := NewChannelBus(mockLogger, 0)
|
|
|
|
if bus == nil {
|
|
t.Fatal("Expected bus, got nil")
|
|
}
|
|
|
|
// Clean up
|
|
_ = bus.Close()
|
|
}
|
|
|
|
func TestChannelBus_Publish(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
mockLogger := &mockLogger{}
|
|
bus := NewChannelBus(mockLogger, 10)
|
|
|
|
testErr := errors.New("test error")
|
|
ctx := context.Background()
|
|
|
|
// Publish error
|
|
bus.Publish(ctx, testErr)
|
|
|
|
// Wait a bit for the error to be processed
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
// Verify error was logged
|
|
mockLogger.mu.Lock()
|
|
errorCount := len(mockLogger.errors)
|
|
mockLogger.mu.Unlock()
|
|
if errorCount == 0 {
|
|
t.Error("Expected error to be logged")
|
|
}
|
|
|
|
// Clean up
|
|
_ = bus.Close()
|
|
}
|
|
|
|
func TestChannelBus_Publish_NilError(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
mockLogger := &mockLogger{}
|
|
bus := NewChannelBus(mockLogger, 10)
|
|
|
|
ctx := context.Background()
|
|
|
|
// Publish nil error (should be ignored)
|
|
bus.Publish(ctx, nil)
|
|
|
|
// Wait a bit
|
|
time.Sleep(50 * time.Millisecond)
|
|
|
|
// Verify nil error was not logged
|
|
mockLogger.mu.Lock()
|
|
errorCount := len(mockLogger.errors)
|
|
mockLogger.mu.Unlock()
|
|
if errorCount > 0 {
|
|
t.Error("Expected nil error to be ignored")
|
|
}
|
|
|
|
// Clean up
|
|
_ = bus.Close()
|
|
}
|
|
|
|
func TestChannelBus_Publish_WithContext(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
mockLogger := &mockLogger{}
|
|
bus := NewChannelBus(mockLogger, 10)
|
|
|
|
testErr := errors.New("test error")
|
|
ctx := context.WithValue(context.Background(), "request_id", "test-request-id")
|
|
|
|
bus.Publish(ctx, testErr)
|
|
|
|
// Wait for processing
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
// Verify error was logged with context
|
|
mockLogger.mu.Lock()
|
|
errorCount := len(mockLogger.errors)
|
|
mockLogger.mu.Unlock()
|
|
if errorCount == 0 {
|
|
t.Error("Expected error to be logged")
|
|
}
|
|
|
|
// Clean up
|
|
_ = bus.Close()
|
|
}
|
|
|
|
func TestChannelBus_Close(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
mockLogger := &mockLogger{}
|
|
bus := NewChannelBus(mockLogger, 10)
|
|
|
|
// Publish some errors
|
|
for i := 0; i < 5; i++ {
|
|
bus.Publish(context.Background(), errors.New("test error"))
|
|
}
|
|
|
|
// Close and wait
|
|
if err := bus.Close(); err != nil {
|
|
t.Errorf("Close failed: %v", err)
|
|
}
|
|
|
|
// Verify channel is closed
|
|
select {
|
|
case <-bus.errors:
|
|
// Channel is closed, this is expected
|
|
default:
|
|
t.Error("Expected errors channel to be closed")
|
|
}
|
|
}
|
|
|
|
func TestChannelBus_Close_MultipleTimes(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
mockLogger := &mockLogger{}
|
|
bus := NewChannelBus(mockLogger, 10)
|
|
|
|
// Close first time
|
|
if err := bus.Close(); err != nil {
|
|
t.Errorf("First Close failed: %v", err)
|
|
}
|
|
|
|
// Close second time should be safe (uses sync.Once)
|
|
// The channel is already closed, but Close() should handle this gracefully
|
|
if err := bus.Close(); err != nil {
|
|
t.Errorf("Second Close failed: %v", err)
|
|
}
|
|
}
|
|
|
|
func TestChannelBus_ChannelFull(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
mockLogger := &mockLogger{}
|
|
// Use small buffer to test channel full scenario
|
|
bus := NewChannelBus(mockLogger, 1)
|
|
|
|
// Fill the channel
|
|
bus.Publish(context.Background(), errors.New("error1"))
|
|
|
|
// This should not block (channel is full, should log directly)
|
|
bus.Publish(context.Background(), errors.New("error2"))
|
|
|
|
// Wait a bit
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
// Clean up
|
|
_ = bus.Close()
|
|
}
|
|
|
|
// mockLogger implements logger.Logger for testing.
|
|
type mockLogger struct {
|
|
errors []string
|
|
mu sync.Mutex
|
|
}
|
|
|
|
func (m *mockLogger) Debug(msg string, fields ...logger.Field) {}
|
|
func (m *mockLogger) Info(msg string, fields ...logger.Field) {}
|
|
func (m *mockLogger) Warn(msg string, fields ...logger.Field) {}
|
|
func (m *mockLogger) Error(msg string, fields ...logger.Field) {
|
|
m.mu.Lock()
|
|
defer m.mu.Unlock()
|
|
m.errors = append(m.errors, msg)
|
|
}
|
|
|
|
func (m *mockLogger) With(fields ...logger.Field) logger.Logger {
|
|
return m
|
|
}
|
|
|
|
func (m *mockLogger) WithContext(ctx context.Context) logger.Logger {
|
|
return m
|
|
}
|