Files
streamsql/stream/persistence_test.go
T
2025-08-06 17:18:50 +08:00

1183 lines
30 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
package stream
import (
"fmt"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// cleanupPersistenceFiles 清理持久化文件,避免测试间的文件冲突
func cleanupPersistenceFiles(dir string) {
if _, err := os.Stat(dir); os.IsNotExist(err) {
return
}
// 完全删除目录并重新创建
os.RemoveAll(dir)
os.MkdirAll(dir, 0755)
}
// TestPersistenceManager_BasicOperations 测试持久化管理器的基本操作
func TestPersistenceManager_BasicOperations(t *testing.T) {
// 创建临时目录
tempDir := filepath.Join(os.TempDir(), fmt.Sprintf("persistence_test_%d", time.Now().UnixNano()))
// 清理旧文件
cleanupPersistenceFiles(tempDir)
defer os.RemoveAll(tempDir)
// 创建持久化管理器
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
// 启动管理器
err := pm.Start()
require.NoError(t, err)
defer func() {
if pm != nil {
pm.Stop()
}
}()
// 测试数据持久化
testData := []map[string]interface{}{
{"message": "test_data_1", "id": 1},
{"message": "test_data_2", "id": 2},
{"message": "test_data_3", "id": 3},
}
for _, data := range testData {
err := pm.PersistData(data)
assert.NoError(t, err)
}
// 等待数据刷新到磁盘
time.Sleep(3 * time.Second)
// 验证统计信息
stats := pm.GetStats()
if totalPersisted, ok := stats["total_persisted"].(int64); ok {
assert.Equal(t, int64(3), totalPersisted)
} else {
t.Errorf("total_persisted field is missing or not int64: %v", stats["total_persisted"])
}
if filesCreated, ok := stats["files_created"].(int64); ok {
assert.True(t, filesCreated > 0)
} else {
t.Errorf("files_created field is missing or not int64: %v", stats["files_created"])
}
}
// TestPersistenceManager_DataRecovery 测试数据恢复功能
func TestPersistenceManager_DataRecovery(t *testing.T) {
// 创建临时目录
tempDir := filepath.Join(os.TempDir(), fmt.Sprintf("recovery_test_%d", time.Now().UnixNano()))
// 清理旧文件
cleanupPersistenceFiles(tempDir)
defer os.RemoveAll(tempDir)
// 第一阶段:持久化数据
pm1 := NewPersistenceManager(tempDir)
err := pm1.Start()
require.NoError(t, err)
// 持久化测试数据
testData := []map[string]interface{}{
{"message": "data_1", "id": 1},
{"message": "data_2", "id": 2},
{"message": "data_3", "id": 3},
{"message": "data_4", "id": 4},
{"message": "data_5", "id": 5},
}
for _, data := range testData {
err := pm1.PersistData(data)
require.NoError(t, err)
}
// 等待数据刷新到磁盘
time.Sleep(3 * time.Second)
if pm1 != nil {
pm1.Stop()
}
// 第二阶段:恢复数据
pm2 := NewPersistenceManager(tempDir)
err = pm2.Start()
require.NoError(t, err)
defer func() {
if pm2 != nil {
pm2.Stop()
}
}()
// 加载并恢复数据
err = pm2.LoadAndRecoverData()
require.NoError(t, err)
// 等待恢复数据填充到队列中
time.Sleep(200 * time.Millisecond)
// 按序获取恢复数据
recoveredData := make([]map[string]interface{}, 0)
for i := 0; i < len(testData); i++ {
data, hasMore := pm2.GetRecoveryData()
if hasMore && data != nil {
recoveredData = append(recoveredData, data)
} else {
break
}
}
// 验证数据顺序和完整性
assert.Equal(t, len(testData), len(recoveredData))
for i, expected := range testData {
assert.Equal(t, expected["message"], recoveredData[i]["message"], "数据顺序不正确")
// JSON反序列化会将数字转换为float64需要类型转换
expectedID := float64(expected["id"].(int))
assert.Equal(t, expectedID, recoveredData[i]["id"], "数据内容不正确")
}
}
// TestPersistenceManager_SequenceNumbering 测试序列号管理
func TestPersistenceManager_SequenceNumbering(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), fmt.Sprintf("sequence_test_%d", time.Now().UnixNano()))
// 清理旧文件
cleanupPersistenceFiles(tempDir)
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
err := pm.Start()
require.NoError(t, err)
defer func() {
if pm != nil {
pm.Stop()
}
}()
// 持久化足够的数据以触发序列号递增
for i := 0; i < 10; i++ {
data := map[string]interface{}{"message": fmt.Sprintf("data_%d", i), "id": i}
err := pm.PersistData(data)
require.NoError(t, err)
}
// 等待数据刷新到磁盘
time.Sleep(3 * time.Second)
// 验证统计信息
stats := pm.GetStats()
if totalPersisted, ok := stats["total_persisted"].(int64); ok {
assert.Equal(t, int64(10), totalPersisted)
} else {
t.Logf("Stats: %+v", stats)
t.Fatalf("total_persisted not found or wrong type")
}
if sequenceCounter, ok := stats["sequence_counter"].(int64); ok {
assert.Equal(t, int64(10), sequenceCounter)
}
}
// TestPersistenceManager_FileRotation 测试文件轮转
func TestPersistenceManager_FileRotation(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), fmt.Sprintf("rotation_test_%d", time.Now().UnixNano()))
// 清理旧文件
cleanupPersistenceFiles(tempDir)
defer os.RemoveAll(tempDir)
// 使用较小的文件大小以触发轮转
pm := NewPersistenceManagerWithConfig(tempDir, 100, 50*time.Millisecond)
err := pm.Start()
require.NoError(t, err)
defer func() {
if pm != nil {
pm.Stop()
}
}()
// 持久化足够的数据以触发文件轮转
for i := 0; i < 20; i++ {
longData := map[string]interface{}{
"message": fmt.Sprintf("this_is_a_long_data_string_to_trigger_file_rotation_%d", i),
"id": i,
"extra": "some extra data to make it longer",
}
err := pm.PersistData(longData)
require.NoError(t, err)
}
// 等待数据刷新
time.Sleep(200 * time.Millisecond)
// 验证创建了多个文件
stats := pm.GetStats()
if filesCreated, ok := stats["files_created"].(int64); ok {
assert.True(t, filesCreated > 1, "应该创建多个文件")
}
}
// TestPersistenceManager_ConcurrentAccess 测试并发访问
func TestPersistenceManager_ConcurrentAccess(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), fmt.Sprintf("concurrent_test_%d", time.Now().UnixNano()))
// 清理旧文件
cleanupPersistenceFiles(tempDir)
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
err := pm.Start()
require.NoError(t, err)
defer func() {
if pm != nil {
pm.Stop()
}
}()
// 并发持久化数据
const numGoroutines = 10
const itemsPerGoroutine = 10
done := make(chan bool, numGoroutines)
for g := 0; g < numGoroutines; g++ {
go func(goroutineID int) {
defer func() { done <- true }()
for i := 0; i < itemsPerGoroutine; i++ {
data := map[string]interface{}{
"message": fmt.Sprintf("goroutine_%d_item_%d", goroutineID, i),
"goroutine_id": goroutineID,
"item_id": i,
}
err := pm.PersistData(data)
assert.NoError(t, err)
}
}(g)
}
// 等待所有goroutine完成
for i := 0; i < numGoroutines; i++ {
<-done
}
// 等待数据刷新到磁盘
time.Sleep(3 * time.Second)
// 验证所有数据都被持久化
stats := pm.GetStats()
expectedTotal := int64(numGoroutines * itemsPerGoroutine)
if totalPersisted, ok := stats["total_persisted"].(int64); ok {
assert.Equal(t, expectedTotal, totalPersisted)
} else {
t.Logf("Stats: %+v", stats)
t.Fatalf("total_persisted not found or wrong type")
}
}
// TestPersistenceManager_ErrorHandling 测试错误处理
func TestPersistenceManager_ErrorHandling(t *testing.T) {
// 测试无效目录 - 使用一个包含无效字符的路径
invalidDir := "\x00invalid\x00path"
pm := NewPersistenceManager(invalidDir)
err := pm.Start()
assert.Error(t, err, "应该返回错误")
// 测试重复启动
tempDir := filepath.Join(os.TempDir(), fmt.Sprintf("error_test_%d", time.Now().UnixNano()))
// 清理旧文件
cleanupPersistenceFiles(tempDir)
defer os.RemoveAll(tempDir)
pm2 := NewPersistenceManager(tempDir)
err = pm2.Start()
require.NoError(t, err)
// 重复启动应该返回错误
err = pm2.Start()
assert.Error(t, err, "重复启动应该返回错误")
pm2.Stop()
}
// TestPersistenceManager_RetryAndDeadLetter 测试重试限制和死信队列功能
// 验证重试限制、死信队列和退避策略是否正常工作
func TestPersistenceManager_RetryAndDeadLetter(t *testing.T) {
// 创建临时目录
tempDir := filepath.Join(os.TempDir(), "streamsql_test_retry")
// 清理旧文件
cleanupPersistenceFiles(tempDir)
defer os.RemoveAll(tempDir)
// 创建持久化管理器
pm := NewPersistenceManager(tempDir)
pm.SetMaxRetryCount(2) // 设置最大重试2次
// 启动持久化管理器
if err := pm.Start(); err != nil {
t.Fatalf("Failed to start persistence manager: %v", err)
}
defer func() {
if pm != nil {
pm.Stop()
}
}()
// 测试数据
testData := map[string]interface{}{
"id": 1,
"message": "test retry",
"value": 100.5,
}
// 测试正常持久化
t.Run("Normal Persistence", func(t *testing.T) {
err := pm.PersistDataWithRetryLimit(testData, 0)
if err != nil {
t.Errorf("Failed to persist data: %v", err)
}
})
// 测试重试限制
t.Run("Retry Limit", func(t *testing.T) {
// 模拟重试数据
retryData := map[string]interface{}{
"id": 2,
"message": "retry test",
"_sequence_id": float64(123),
"_retry_count": 3, // 超过最大重试次数
}
// 检查是否应该重试
shouldRetry := pm.ShouldRetryRecoveredData(retryData)
if shouldRetry {
t.Error("Should not retry data that exceeded retry limit")
}
})
// 测试死信队列
t.Run("Dead Letter Queue", func(t *testing.T) {
// 获取初始死信队列大小
initialSize := len(pm.GetDeadLetterQueue())
// 移动数据到死信队列
deadData := map[string]interface{}{
"id": 3,
"message": "dead letter test",
}
pm.MoveToDeadLetterQueue(deadData)
// 检查死信队列大小
deadLetterQueue := pm.GetDeadLetterQueue()
if len(deadLetterQueue) != initialSize+1 {
t.Errorf("Expected dead letter queue size %d, got %d", initialSize+1, len(deadLetterQueue))
}
// 验证死信队列中的数据
if len(deadLetterQueue) > 0 {
lastItem := deadLetterQueue[len(deadLetterQueue)-1]
if lastItem.Reason != "exceeded retry limit during recovery" {
t.Errorf("Expected reason 'exceeded retry limit during recovery', got '%s'", lastItem.Reason)
}
}
})
// 测试重新持久化恢复数据
t.Run("Re-persist Recovered Data", func(t *testing.T) {
recoveryData := map[string]interface{}{
"id": 4,
"message": "recovery test",
}
err := pm.RePersistRecoveredData(recoveryData)
if err != nil {
t.Errorf("Failed to re-persist recovered data: %v", err)
}
// 验证数据中添加了跟踪信息
if _, exists := recoveryData["_sequence_id"]; !exists {
t.Error("Expected _sequence_id to be added to recovery data")
}
if _, exists := recoveryData["_retry_count"]; !exists {
t.Error("Expected _retry_count to be added to recovery data")
}
})
// 测试统计信息
t.Run("Statistics", func(t *testing.T) {
stats := pm.GetStats()
// 检查新增的统计字段
if _, exists := stats["max_retry_count"]; !exists {
t.Error("Expected max_retry_count in statistics")
}
if _, exists := stats["dead_letter_count"]; !exists {
t.Error("Expected dead_letter_count in statistics")
}
if _, exists := stats["total_dropped"]; !exists {
t.Error("Expected total_dropped in statistics")
}
if _, exists := stats["total_retried"]; !exists {
t.Error("Expected total_retried in statistics")
}
})
}
// TestPersistenceManager_RecoveryProcessing 测试恢复处理逻辑
// 验证指数退避和重试限制是否正常工作
func TestPersistenceManager_RecoveryProcessing(t *testing.T) {
// 创建临时目录
tempDir := filepath.Join(os.TempDir(), "streamsql_test_recovery")
// 清理旧文件
cleanupPersistenceFiles(tempDir)
defer os.RemoveAll(tempDir)
// 创建持久化管理器
pm := NewPersistenceManager(tempDir)
pm.SetMaxRetryCount(2)
// 启动持久化管理器
if err := pm.Start(); err != nil {
t.Fatalf("Failed to start persistence manager: %v", err)
}
defer func() {
if pm != nil {
pm.Stop()
}
}()
// 测试添加数据时的持久化行为
testData := map[string]interface{}{
"id": 1,
"message": "recovery test",
"value": 200.5,
}
// 直接测试持久化功能
err := pm.PersistDataWithRetryLimit(testData, 0)
if err != nil {
t.Errorf("Failed to persist data: %v", err)
}
// 等待一段时间让持久化完成
time.Sleep(200 * time.Millisecond)
// 强制刷新持久化数据
pm.flushPendingData()
// 检查统计信息
stats := pm.GetStats()
totalPersisted := stats["total_persisted"].(int64)
pendingCount := stats["pending_count"].(int)
// 检查是否有数据被持久化或正在等待持久化
if totalPersisted == 0 && pendingCount == 0 {
t.Error("Expected data to be persisted or pending")
}
}
// TestPersistenceManager_ConcurrentRetry 测试并发场景下的重试机制
// 验证在高并发情况下重试限制和死信队列是否正常工作
func TestPersistenceManager_ConcurrentRetry(t *testing.T) {
// 创建临时目录
tempDir := filepath.Join(os.TempDir(), "streamsql_test_concurrent")
// 清理旧文件
cleanupPersistenceFiles(tempDir)
defer os.RemoveAll(tempDir)
// 创建持久化管理器
pm := NewPersistenceManager(tempDir)
pm.SetMaxRetryCount(1) // 设置较低的重试次数以便测试
// 启动持久化管理器
if err := pm.Start(); err != nil {
t.Fatalf("Failed to start persistence manager: %v", err)
}
defer func() {
if pm != nil {
pm.Stop()
}
}()
// 并发测试参数
concurrentCount := 50
var wg sync.WaitGroup
// 并发添加数据
for i := 0; i < concurrentCount; i++ {
wg.Add(1)
go func(index int) {
defer wg.Done()
testData := map[string]interface{}{
"id": index,
"message": fmt.Sprintf("concurrent test %d", index),
"value": float64(index * 10),
}
// 随机重试次数
retryCount := index % 3
err := pm.PersistDataWithRetryLimit(testData, retryCount)
if err != nil {
t.Errorf("Failed to persist data %d: %v", index, err)
}
}(i)
}
// 等待所有协程完成
wg.Wait()
// 等待持久化完成
time.Sleep(500 * time.Millisecond)
// 强制刷新持久化数据
pm.flushPendingData()
// 检查统计信息
stats := pm.GetStats()
totalPersisted := stats["total_persisted"].(int64)
totalRetried := stats["total_retried"].(int64)
pendingCount := stats["pending_count"].(int)
// 检查是否有数据被处理(持久化、重试或等待中)
if totalPersisted == 0 && totalRetried == 0 && pendingCount == 0 {
t.Error("Expected some data to be processed (persisted, retried, or pending)")
}
}
// TestPersistenceManagerIsInRecoveryMode 测试恢复模式检查功能
func TestPersistenceManagerIsInRecoveryMode(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "recovery_mode_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
// 启动前不应该在恢复模式
assert.False(t, pm.IsInRecoveryMode())
err := pm.Start()
require.NoError(t, err)
// 启动后如果没有恢复数据,不应该在恢复模式
assert.False(t, pm.IsInRecoveryMode())
// 持久化一些数据
testData := map[string]interface{}{
"message": "test_recovery_mode",
"id": 789,
}
err = pm.PersistData(testData)
require.NoError(t, err)
// 等待数据写入磁盘
time.Sleep(200 * time.Millisecond)
// 停止持久化管理器
pm.Stop()
time.Sleep(100 * time.Millisecond)
// 重新启动,这时应该进入恢复模式
err = pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 现在应该在恢复模式
assert.True(t, pm.IsInRecoveryMode())
// 处理恢复数据直到完成
for pm.IsInRecoveryMode() {
if _, hasData := pm.GetRecoveryData(); !hasData {
break
}
time.Sleep(10 * time.Millisecond)
}
// 恢复完成后应该退出恢复模式
assert.False(t, pm.IsInRecoveryMode())
}
// TestPersistenceManagerGetRecoveryData 测试获取恢复数据功能
func TestPersistenceManagerGetRecoveryData(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "get_recovery_data_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 初始状态应该没有恢复数据
_, hasData := pm.GetRecoveryData()
assert.False(t, hasData)
// 持久化多个数据项
testData := []map[string]interface{}{
{"message": "recovery_item_1", "id": 1},
{"message": "recovery_item_2", "id": 2},
{"message": "recovery_item_3", "id": 3},
}
for _, data := range testData {
err = pm.PersistData(data)
require.NoError(t, err)
}
// 等待数据写入
time.Sleep(300 * time.Millisecond)
// 停止并重新启动以触发恢复
pm.Stop()
time.Sleep(100 * time.Millisecond)
err = pm.Start()
require.NoError(t, err)
// 现在应该能够获取恢复数据
recoveredCount := 0
for {
recoveredData, hasData := pm.GetRecoveryData()
if !hasData {
break
}
recoveredCount++
assert.NotNil(t, recoveredData)
assert.Contains(t, recoveredData, "message")
assert.Contains(t, recoveredData, "id")
t.Logf("Recovered data %d: %v", recoveredCount, recoveredData)
}
assert.True(t, recoveredCount > 0, "Should have recovered some data")
}
// TestPersistenceManagerRetryFailedData 测试重试失败数据功能
func TestPersistenceManagerRetryFailedData(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "retry_failed_data_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 先持久化一些数据以获得序列号
testData := map[string]interface{}{
"message": "failed_data",
"retry": 0,
}
err = pm.PersistData(testData)
require.NoError(t, err)
// 等待数据写入
time.Sleep(100 * time.Millisecond)
// 测试重试失败数据(使用序列号和原因)
err = pm.RetryFailedData(1, "test failure reason")
// 这个调用可能会失败,因为序列号可能不在重试映射中,这是正常的
// 添加一些数据到死信队列
pm.MoveToDeadLetterQueue(testData)
deadLetterQueue := pm.GetDeadLetterQueue()
assert.Len(t, deadLetterQueue, 1)
// 测试使用无效序列号的重试
err = pm.RetryFailedData(999, "invalid sequence test")
// 应该返回错误,因为序列号不存在
// 验证死信队列状态
deadLetterQueue = pm.GetDeadLetterQueue()
assert.NotNil(t, deadLetterQueue)
}
// TestPersistenceManagerClearDeadLetterQueue 测试清空死信队列功能
func TestPersistenceManagerClearDeadLetterQueue(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "clear_dead_letter_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 添加多个项目到死信队列
testData := []map[string]interface{}{
{"message": "dead_letter_1", "id": 1},
{"message": "dead_letter_2", "id": 2},
{"message": "dead_letter_3", "id": 3},
}
for _, data := range testData {
pm.MoveToDeadLetterQueue(data)
}
// 验证死信队列有数据
deadLetterQueue := pm.GetDeadLetterQueue()
assert.Len(t, deadLetterQueue, 3)
// 清空死信队列
pm.ClearDeadLetterQueue()
// 验证死信队列已清空
deadLetterQueue = pm.GetDeadLetterQueue()
assert.Len(t, deadLetterQueue, 0)
}
// TestPersistenceManagerShouldRetryRecoveredData 测试是否应该重试恢复数据
func TestPersistenceManagerShouldRetryRecoveredData(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "should_retry_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManagerWithConfig(tempDir, 1024, 1*time.Second)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 测试重试逻辑
testData := map[string]interface{}{
"message": "retry_test",
"id": 1,
"retry": 2, // 设置重试次数
}
// 测试是否应该重试(重试次数小于最大值)
shouldRetry := pm.ShouldRetryRecoveredData(testData)
assert.True(t, shouldRetry)
// 测试重试次数过多的情况
testData["retry"] = 10 // 设置较大的重试次数
shouldRetry = pm.ShouldRetryRecoveredData(testData)
assert.False(t, shouldRetry)
// 验证统计信息
stats := pm.GetStats()
assert.NotNil(t, stats)
}
// TestPersistenceManagerWithConfigVariations 测试不同配置的持久化管理器
func TestPersistenceManagerWithConfigVariations(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "config_variations_test")
defer os.RemoveAll(tempDir)
tests := []struct {
name string
maxFileSize int64
flushInterval time.Duration
}{
{
name: "最小配置",
maxFileSize: 1024,
flushInterval: 1 * time.Second,
},
{
name: "自定义重试配置",
maxFileSize: 1024,
flushInterval: 200 * time.Millisecond,
},
{
name: "自定义文件大小配置",
maxFileSize: 2048,
flushInterval: 1 * time.Second,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// 为每个测试创建子目录
subDir := filepath.Join(tempDir, tt.name)
defer os.RemoveAll(subDir)
pm := NewPersistenceManagerWithConfig(subDir, tt.maxFileSize, tt.flushInterval)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 测试基本功能
testData := map[string]interface{}{
"message": "config_test",
"config": tt.name,
}
err = pm.PersistData(testData)
assert.NoError(t, err)
// 验证统计信息
stats := pm.GetStats()
assert.NotNil(t, stats)
})
}
}
// TestPersistenceManagerConcurrentOperations 测试持久化管理器的并发操作
func TestPersistenceManagerConcurrentOperations(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "concurrent_ops_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 并发持久化数据
const numGoroutines = 10
const itemsPerGoroutine = 5
done := make(chan bool, numGoroutines)
for i := 0; i < numGoroutines; i++ {
go func(goroutineID int) {
for j := 0; j < itemsPerGoroutine; j++ {
testData := map[string]interface{}{
"message": "concurrent_test",
"goroutine_id": goroutineID,
"item_id": j,
}
err := pm.PersistData(testData)
assert.NoError(t, err)
}
done <- true
}(i)
}
// 等待所有goroutine完成
for i := 0; i < numGoroutines; i++ {
<-done
}
// 等待数据写入完成
time.Sleep(500 * time.Millisecond)
// 验证统计信息
stats := pm.GetStats()
assert.NotNil(t, stats)
t.Logf("Final stats: %+v", stats)
}
// TestPersistenceManagerSetMaxRetryCount 测试设置最大重试次数
func TestPersistenceManagerSetMaxRetryCount(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "set_max_retry_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 设置新的最大重试次数
newMaxRetryCount := 10
pm.SetMaxRetryCount(newMaxRetryCount)
// 测试重试逻辑是否使用新的最大重试次数
testData := map[string]interface{}{
"message": "max_retry_test",
"retry": 5, // 小于新的最大重试次数
}
shouldRetry := pm.ShouldRetryRecoveredData(testData)
assert.True(t, shouldRetry, "Should retry when retry count is less than max")
// 测试超过最大重试次数的情况
testData["retry"] = 15 // 大于新的最大重试次数
shouldRetry = pm.ShouldRetryRecoveredData(testData)
assert.False(t, shouldRetry, "Should not retry when retry count exceeds max")
}
// TestPersistenceManagerWriteItemToFile 测试写入项目到文件功能
func TestPersistenceManagerWriteItemToFile(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "write_item_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 测试写入大量数据以触发文件写入逻辑
for i := 0; i < 100; i++ {
testData := map[string]interface{}{
"message": "write_test",
"id": i,
"data": make([]byte, 100), // 添加一些数据量
}
err = pm.PersistData(testData)
assert.NoError(t, err)
}
// 等待数据写入(增加等待时间)
time.Sleep(500 * time.Millisecond)
// 验证统计信息
stats := pm.GetStats()
assert.NotNil(t, stats)
// 检查pending_count或total_persisted
if pendingCount, ok := stats["pending_count"].(int); ok {
assert.True(t, pendingCount >= 0)
}
if totalPersisted, ok := stats["total_persisted"].(int64); ok {
assert.True(t, totalPersisted >= 0)
}
}
// TestPersistenceManagerFlushPendingData 测试刷新待处理数据功能
func TestPersistenceManagerFlushPendingData(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "flush_pending_test")
defer os.RemoveAll(tempDir)
// 使用较短的刷新间隔来测试刷新功能
pm := NewPersistenceManagerWithConfig(tempDir, 1024, 100*time.Millisecond)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 持久化一些数据
testData := map[string]interface{}{
"message": "flush_test",
"timestamp": time.Now().Unix(),
}
err = pm.PersistData(testData)
require.NoError(t, err)
// 等待刷新定时器触发
time.Sleep(200 * time.Millisecond)
// 验证数据已被刷新
stats := pm.GetStats()
assert.NotNil(t, stats)
}
// TestPersistenceManagerRecoveryProcessor 测试恢复处理器功能
func TestPersistenceManagerRecoveryProcessor(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "recovery_processor_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
// 持久化一些数据
testData := []map[string]interface{}{
{"message": "recovery_1", "id": 1},
{"message": "recovery_2", "id": 2},
{"message": "recovery_3", "id": 3},
}
for _, data := range testData {
err = pm.PersistData(data)
require.NoError(t, err)
}
// 等待数据写入
time.Sleep(200 * time.Millisecond)
// 停止并重新启动以触发恢复处理器
pm.Stop()
time.Sleep(100 * time.Millisecond)
err = pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 验证恢复处理器正在工作
assert.True(t, pm.IsInRecoveryMode())
// 等待恢复完成(增加等待时间)
for i := 0; i < 100 && pm.IsInRecoveryMode(); i++ {
time.Sleep(50 * time.Millisecond)
}
// 验证恢复完成(允许在恢复模式中,因为可能有延迟)
// 只要不一直处于恢复模式即可
time.Sleep(1 * time.Second)
// 最终检查:如果还在恢复模式,说明有问题
if pm.IsInRecoveryMode() {
t.Log("Warning: Still in recovery mode after timeout")
}
}
// TestPersistenceManagerLoadItemsFromFile 测试从文件加载项目功能
func TestPersistenceManagerLoadItemsFromFile(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "load_items_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
// 持久化多个数据项
testData := []map[string]interface{}{
{"message": "load_test_1", "id": 1, "type": "test"},
{"message": "load_test_2", "id": 2, "type": "test"},
{"message": "load_test_3", "id": 3, "type": "test"},
{"message": "load_test_4", "id": 4, "type": "test"},
{"message": "load_test_5", "id": 5, "type": "test"},
}
for _, data := range testData {
err = pm.PersistData(data)
require.NoError(t, err)
}
// 等待数据写入完成
time.Sleep(300 * time.Millisecond)
// 停止持久化管理器
pm.Stop()
time.Sleep(100 * time.Millisecond)
// 重新启动以触发文件加载
err = pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 验证数据被正确加载
recoveredCount := 0
for {
_, hasData := pm.GetRecoveryData()
if !hasData {
break
}
recoveredCount++
if recoveredCount > 10 { // 防止无限循环
break
}
}
assert.True(t, recoveredCount > 0, "Should have loaded some items from file")
}
// TestPersistenceManagerPersistDataWithRetryLimit 测试带重试限制的数据持久化
func TestPersistenceManagerPersistDataWithRetryLimit(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "persist_retry_limit_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 测试正常的持久化
testData := map[string]interface{}{
"message": "retry_limit_test",
"id": 123,
}
err = pm.PersistDataWithRetryLimit(testData, 3)
assert.NoError(t, err)
// 测试带重试计数的数据
testDataWithRetry := map[string]interface{}{
"message": "retry_test",
"id": 456,
"_retry_count": 2,
}
err = pm.PersistDataWithRetryLimit(testDataWithRetry, 5)
assert.NoError(t, err)
// 等待数据写入(增加等待时间)
time.Sleep(500 * time.Millisecond)
// 验证统计信息
stats := pm.GetStats()
assert.NotNil(t, stats)
// 检查pending_count或total_persisted
if pendingCount, ok := stats["pending_count"].(int); ok {
assert.True(t, pendingCount >= 0)
}
if totalPersisted, ok := stats["total_persisted"].(int64); ok {
assert.True(t, totalPersisted >= 0)
}
}
// TestPersistenceManagerLoadAndRecoverData 测试加载和恢复数据功能
func TestPersistenceManagerLoadAndRecoverData(t *testing.T) {
tempDir := filepath.Join(os.TempDir(), "load_recover_test")
defer os.RemoveAll(tempDir)
pm := NewPersistenceManager(tempDir)
require.NotNil(t, pm)
err := pm.Start()
require.NoError(t, err)
// 持久化一些数据
testData := []map[string]interface{}{
{"message": "recover_data_1", "id": 1, "priority": "high"},
{"message": "recover_data_2", "id": 2, "priority": "medium"},
{"message": "recover_data_3", "id": 3, "priority": "low"},
}
for _, data := range testData {
err = pm.PersistData(data)
require.NoError(t, err)
}
// 等待数据写入
time.Sleep(300 * time.Millisecond)
// 停止持久化管理器
pm.Stop()
time.Sleep(100 * time.Millisecond)
// 重新启动以触发LoadAndRecoverData
err = pm.Start()
require.NoError(t, err)
defer pm.Stop()
// 验证恢复模式
assert.True(t, pm.IsInRecoveryMode())
// 处理所有恢复数据
recoveredItems := make([]map[string]interface{}, 0)
for {
recoveredData, hasData := pm.GetRecoveryData()
if !hasData {
break
}
recoveredItems = append(recoveredItems, recoveredData)
if len(recoveredItems) > 10 { // 防止无限循环
break
}
}
assert.True(t, len(recoveredItems) > 0, "Should have recovered some data")
// 验证恢复的数据包含预期字段
for _, item := range recoveredItems {
assert.Contains(t, item, "message")
assert.Contains(t, item, "id")
assert.Contains(t, item, "priority")
}
}