Skip to content

⚡ 性能优化指南

Bubble 组件库的性能优化涵盖渲染效率、内存管理、并发处理等多个方面,本文档提供全面的性能优化策略和实践。

🎯 性能优化目标

核心指标

  • 渲染延迟 < 16ms (60 FPS)
  • 内存占用 控制在合理范围
  • CPU 使用率 避免不必要的计算
  • 并发效率 最大化吞吐量
  • 响应时间 用户交互响应 < 100ms

📊 渲染性能优化

高效的视图渲染

go
// pkg/bubble/performance/efficient_view.go
package performance

import (
    "strings"
    "sync"
    "time"
    
    "github.com/charmbracelet/lipgloss"
)

// 视图缓存管理
type ViewCache struct {
    cache      map[string]CachedView
    mu         sync.RWMutex
    maxEntries int
    ttl        time.Duration
}

type CachedView struct {
    Content   string
    CreatedAt time.Time
    Hash      uint64
}

func NewViewCache(maxEntries int, ttl time.Duration) *ViewCache {
    return &ViewCache{
        cache:      make(map[string]CachedView),
        maxEntries: maxEntries,
        ttl:        ttl,
    }
}

// 获取缓存的视图
func (c *ViewCache) Get(key string) (string, bool) {
    c.mu.RLock()
    defer c.mu.RUnlock()
    
    cached, exists := c.cache[key]
    if !exists {
        return "", false
    }
    
    // 检查是否过期
    if time.Since(cached.CreatedAt) > c.ttl {
        return "", false
    }
    
    return cached.Content, true
}

// 设置缓存
func (c *ViewCache) Set(key, content string, hash uint64) {
    c.mu.Lock()
    defer c.mu.Unlock()
    
    // 如果缓存满了,删除最旧的条目
    if len(c.cache) >= c.maxEntries {
        c.evictOldest()
    }
    
    c.cache[key] = CachedView{
        Content:   content,
        CreatedAt: time.Now(),
        Hash:      hash,
    }
}

// 清理过期缓存
func (c *ViewCache) evictOldest() {
    var oldestKey string
    var oldestTime time.Time
    
    for key, cached := range c.cache {
        if oldestKey == "" || cached.CreatedAt.Before(oldestTime) {
            oldestKey = key
            oldestTime = cached.CreatedAt
        }
    }
    
    if oldestKey != "" {
        delete(c.cache, oldestKey)
    }
}

// 优化的进度条渲染器
type OptimizedProgressRenderer struct {
    cache        *ViewCache
    lastProgress map[string]int
    lastHash     map[string]uint64
    mu           sync.RWMutex
}

func NewOptimizedProgressRenderer() *OptimizedProgressRenderer {
    return &OptimizedProgressRenderer{
        cache:        NewViewCache(100, 5*time.Second),
        lastProgress: make(map[string]int),
        lastHash:     make(map[string]uint64),
    }
}

// 渲染进度条(带缓存优化)
func (r *OptimizedProgressRenderer) RenderProgress(id string, current, total int, width int, style lipgloss.Style) string {
    // 检查是否需要重新渲染
    r.mu.RLock()
    lastProg, exists := r.lastProgress[id]
    r.mu.RUnlock()
    
    if exists && lastProg == current {
        // 进度没有变化,尝试从缓存获取
        cacheKey := fmt.Sprintf("%s-%d-%d-%d", id, current, total, width)
        if cached, found := r.cache.Get(cacheKey); found {
            return cached
        }
    }
    
    // 计算新的渲染内容
    percentage := float64(current) / float64(total)
    filledWidth := int(float64(width) * percentage)
    emptyWidth := width - filledWidth
    
    // 使用字符串构建器提高效率
    var builder strings.Builder
    builder.Grow(width + 50) // 预分配容量
    
    // 构建进度条
    filled := strings.Repeat("█", filledWidth)
    empty := strings.Repeat("░", emptyWidth)
    
    progressBar := style.Render(filled + empty)
    builder.WriteString(progressBar)
    
    // 添加百分比
    percentText := fmt.Sprintf(" %.1f%% (%d/%d)", percentage*100, current, total)
    builder.WriteString(percentText)
    
    result := builder.String()
    
    // 更新缓存和状态
    r.mu.Lock()
    r.lastProgress[id] = current
    hash := hashString(result)
    r.lastHash[id] = hash
    r.mu.Unlock()
    
    cacheKey := fmt.Sprintf("%s-%d-%d-%d", id, current, total, width)
    r.cache.Set(cacheKey, result, hash)
    
    return result
}

// 字符串哈希函数
func hashString(s string) uint64 {
    h := uint64(14695981039346656037) // FNV offset basis
    for _, b := range []byte(s) {
        h ^= uint64(b)
        h *= 1099511628211 // FNV prime
    }
    return h
}

// 批量渲染优化
type BatchRenderer struct {
    items       []RenderItem
    resultChan  chan RenderResult
    workerPool  chan struct{}
    maxWorkers  int
}

type RenderItem struct {
    ID       string
    Data     interface{}
    Template func(interface{}) string
}

type RenderResult struct {
    ID      string
    Content string
    Error   error
}

func NewBatchRenderer(maxWorkers int) *BatchRenderer {
    return &BatchRenderer{
        items:      make([]RenderItem, 0, 100),
        resultChan: make(chan RenderResult, 100),
        workerPool: make(chan struct{}, maxWorkers),
        maxWorkers: maxWorkers,
    }
}

func (b *BatchRenderer) AddItem(item RenderItem) {
    b.items = append(b.items, item)
}

func (b *BatchRenderer) RenderAll() <-chan RenderResult {
    // 启动工作者处理渲染任务
    for _, item := range b.items {
        go b.renderItem(item)
    }
    
    return b.resultChan
}

func (b *BatchRenderer) renderItem(item RenderItem) {
    // 获取工作者槽位
    b.workerPool <- struct{}{}
    defer func() { <-b.workerPool }()
    
    result := RenderResult{ID: item.ID}
    
    defer func() {
        if r := recover(); r != nil {
            result.Error = fmt.Errorf("render panic: %v", r)
        }
        b.resultChan <- result
    }()
    
    result.Content = item.Template(item.Data)
}

差分渲染优化

go
// pkg/bubble/performance/diff_render.go
package performance

import (
    "strings"
    "unicode/utf8"
)

// 差分渲染器
type DiffRenderer struct {
    lastContent string
    lastLines   []string
}

func NewDiffRenderer() *DiffRenderer {
    return &DiffRenderer{}
}

// 计算差异并只渲染变化的部分
func (d *DiffRenderer) RenderDiff(newContent string) string {
    if d.lastContent == newContent {
        return "" // 没有变化,不需要重新渲染
    }
    
    newLines := strings.Split(newContent, "\n")
    
    if len(d.lastLines) == 0 {
        // 首次渲染,直接返回全部内容
        d.lastContent = newContent
        d.lastLines = newLines
        return newContent
    }
    
    // 计算行级差异
    changes := d.computeLineDiff(d.lastLines, newLines)
    
    if len(changes) == 0 {
        return "" // 没有变化
    }
    
    // 生成最小更新命令
    var updateCommands strings.Builder
    
    for _, change := range changes {
        switch change.Type {
        case ChangeTypeInsert:
            updateCommands.WriteString(fmt.Sprintf("\033[%d;1H%s\033[K", 
                change.LineNumber+1, change.Content))
        case ChangeTypeDelete:
            updateCommands.WriteString(fmt.Sprintf("\033[%d;1H\033[K", 
                change.LineNumber+1))
        case ChangeTypeModify:
            updateCommands.WriteString(fmt.Sprintf("\033[%d;1H%s\033[K", 
                change.LineNumber+1, change.Content))
        }
    }
    
    d.lastContent = newContent
    d.lastLines = newLines
    
    return updateCommands.String()
}

type ChangeType int

const (
    ChangeTypeInsert ChangeType = iota
    ChangeTypeDelete
    ChangeTypeModify
)

type LineChange struct {
    Type       ChangeType
    LineNumber int
    Content    string
}

// 计算行级差异(简化的 LCS 算法)
func (d *DiffRenderer) computeLineDiff(oldLines, newLines []string) []LineChange {
    var changes []LineChange
    
    minLen := len(oldLines)
    if len(newLines) < minLen {
        minLen = len(newLines)
    }
    
    // 比较共同行数
    for i := 0; i < minLen; i++ {
        if oldLines[i] != newLines[i] {
            changes = append(changes, LineChange{
                Type:       ChangeTypeModify,
                LineNumber: i,
                Content:    newLines[i],
            })
        }
    }
    
    // 处理新增的行
    if len(newLines) > len(oldLines) {
        for i := len(oldLines); i < len(newLines); i++ {
            changes = append(changes, LineChange{
                Type:       ChangeTypeInsert,
                LineNumber: i,
                Content:    newLines[i],
            })
        }
    }
    
    // 处理删除的行
    if len(oldLines) > len(newLines) {
        for i := len(newLines); i < len(oldLines); i++ {
            changes = append(changes, LineChange{
                Type:       ChangeTypeDelete,
                LineNumber: i,
                Content:    "",
            })
        }
    }
    
    return changes
}

🧠 内存管理优化

对象池模式

go
// pkg/bubble/performance/object_pool.go
package performance

import (
    "sync"
    "strings"
)

// 字符串构建器池
var stringBuilderPool = sync.Pool{
    New: func() interface{} {
        return &strings.Builder{}
    },
}

// 获取字符串构建器
func GetStringBuilder() *strings.Builder {
    sb := stringBuilderPool.Get().(*strings.Builder)
    sb.Reset()
    return sb
}

// 归还字符串构建器
func PutStringBuilder(sb *strings.Builder) {
    if sb.Cap() > 64*1024 { // 如果容量太大,不放回池中
        return
    }
    stringBuilderPool.Put(sb)
}

// 进度条缓冲区池
type ProgressBuffer struct {
    lines []string
    cap   int
}

var progressBufferPool = sync.Pool{
    New: func() interface{} {
        return &ProgressBuffer{
            lines: make([]string, 0, 50),
            cap:   50,
        }
    },
}

func GetProgressBuffer() *ProgressBuffer {
    pb := progressBufferPool.Get().(*ProgressBuffer)
    pb.lines = pb.lines[:0] // 清空但保留容量
    return pb
}

func PutProgressBuffer(pb *ProgressBuffer) {
    if pb.cap > 1000 { // 防止内存泄漏
        return
    }
    progressBufferPool.Put(pb)
}

// 使用对象池的优化渲染
func (r *OptimizedProgressRenderer) RenderWithPool(tasks []Task) string {
    sb := GetStringBuilder()
    defer PutStringBuilder(sb)
    
    pb := GetProgressBuffer()
    defer PutProgressBuffer(pb)
    
    // 使用池化的对象进行渲染
    for _, task := range tasks {
        line := r.renderTaskLine(task)
        pb.lines = append(pb.lines, line)
    }
    
    for i, line := range pb.lines {
        if i > 0 {
            sb.WriteString("\n")
        }
        sb.WriteString(line)
    }
    
    return sb.String()
}

// 内存监控
type MemoryMonitor struct {
    mu           sync.RWMutex
    allocBytes   uint64
    allocObjects uint64
    gcCycles     uint64
}

func NewMemoryMonitor() *MemoryMonitor {
    return &MemoryMonitor{}
}

func (m *MemoryMonitor) UpdateStats() {
    var ms runtime.MemStats
    runtime.ReadMemStats(&ms)
    
    m.mu.Lock()
    defer m.mu.Unlock()
    
    m.allocBytes = ms.Alloc
    m.allocObjects = ms.Mallocs - ms.Frees
    m.gcCycles = uint64(ms.NumGC)
}

func (m *MemoryMonitor) GetStats() (uint64, uint64, uint64) {
    m.mu.RLock()
    defer m.mu.RUnlock()
    
    return m.allocBytes, m.allocObjects, m.gcCycles
}

内存预分配策略

go
// pkg/bubble/performance/prealloc.go
package performance

// 预分配策略
type PreallocStrategy struct {
    initialCapacity int
    growthFactor    float64
    maxCapacity     int
}

func NewPreallocStrategy(initial int, growth float64, max int) *PreallocStrategy {
    return &PreallocStrategy{
        initialCapacity: initial,
        growthFactor:    growth,
        maxCapacity:     max,
    }
}

// 计算下一个容量
func (s *PreallocStrategy) NextCapacity(current, needed int) int {
    if needed <= current {
        return current
    }
    
    newCap := int(float64(current) * s.growthFactor)
    if newCap < needed {
        newCap = needed
    }
    
    if newCap > s.maxCapacity {
        newCap = s.maxCapacity
    }
    
    return newCap
}

// 优化的任务列表
type OptimizedTaskList struct {
    tasks    []Task
    strategy *PreallocStrategy
}

func NewOptimizedTaskList() *OptimizedTaskList {
    strategy := NewPreallocStrategy(10, 1.5, 10000)
    
    return &OptimizedTaskList{
        tasks:    make([]Task, 0, strategy.initialCapacity),
        strategy: strategy,
    }
}

func (l *OptimizedTaskList) Add(task Task) {
    if len(l.tasks) == cap(l.tasks) {
        // 需要扩容
        newCap := l.strategy.NextCapacity(cap(l.tasks), len(l.tasks)+1)
        newTasks := make([]Task, len(l.tasks), newCap)
        copy(newTasks, l.tasks)
        l.tasks = newTasks
    }
    
    l.tasks = append(l.tasks, task)
}

func (l *OptimizedTaskList) Get(index int) Task {
    if index < 0 || index >= len(l.tasks) {
        return nil
    }
    return l.tasks[index]
}

func (l *OptimizedTaskList) Len() int {
    return len(l.tasks)
}

⚡ 并发性能优化

无锁数据结构

go
// pkg/bubble/performance/lockfree.go
package performance

import (
    "sync/atomic"
    "unsafe"
)

// 无锁队列(简化版)
type LockFreeQueue struct {
    head unsafe.Pointer
    tail unsafe.Pointer
}

type queueNode struct {
    data interface{}
    next unsafe.Pointer
}

func NewLockFreeQueue() *LockFreeQueue {
    node := &queueNode{}
    queue := &LockFreeQueue{
        head: unsafe.Pointer(node),
        tail: unsafe.Pointer(node),
    }
    return queue
}

func (q *LockFreeQueue) Enqueue(data interface{}) {
    newNode := &queueNode{data: data}
    newNodePtr := unsafe.Pointer(newNode)
    
    for {
        tail := atomic.LoadPointer(&q.tail)
        tailNode := (*queueNode)(tail)
        next := atomic.LoadPointer(&tailNode.next)
        
        if tail == atomic.LoadPointer(&q.tail) {
            if next == nil {
                if atomic.CompareAndSwapPointer(&tailNode.next, next, newNodePtr) {
                    atomic.CompareAndSwapPointer(&q.tail, tail, newNodePtr)
                    break
                }
            } else {
                atomic.CompareAndSwapPointer(&q.tail, tail, next)
            }
        }
    }
}

func (q *LockFreeQueue) Dequeue() interface{} {
    for {
        head := atomic.LoadPointer(&q.head)
        tail := atomic.LoadPointer(&q.tail)
        headNode := (*queueNode)(head)
        next := atomic.LoadPointer(&headNode.next)
        
        if head == atomic.LoadPointer(&q.head) {
            if head == tail {
                if next == nil {
                    return nil // 队列为空
                }
                atomic.CompareAndSwapPointer(&q.tail, tail, next)
            } else {
                nextNode := (*queueNode)(next)
                data := nextNode.data
                if atomic.CompareAndSwapPointer(&q.head, head, next) {
                    return data
                }
            }
        }
    }
}

// 原子计数器
type AtomicCounter struct {
    value int64
}

func (c *AtomicCounter) Increment() int64 {
    return atomic.AddInt64(&c.value, 1)
}

func (c *AtomicCounter) Decrement() int64 {
    return atomic.AddInt64(&c.value, -1)
}

func (c *AtomicCounter) Get() int64 {
    return atomic.LoadInt64(&c.value)
}

func (c *AtomicCounter) Set(value int64) {
    atomic.StoreInt64(&c.value, value)
}

func (c *AtomicCounter) CompareAndSwap(old, new int64) bool {
    return atomic.CompareAndSwapInt64(&c.value, old, new)
}

高性能工作池

go
// pkg/bubble/performance/fast_pool.go
package performance

import (
    "context"
    "runtime"
    "sync"
    "sync/atomic"
    "time"
)

// 高性能工作池
type FastWorkerPool struct {
    workers     []*FastWorker
    taskQueue   chan Task
    ctx         context.Context
    cancel      context.CancelFunc
    wg          sync.WaitGroup
    
    // 性能指标
    submittedTasks int64
    completedTasks int64
    failedTasks    int64
}

type FastWorker struct {
    id          int
    pool        *FastWorkerPool
    taskQueue   <-chan Task
    localQueue  []Task
    
    // 性能优化
    lastActive  time.Time
    processedTasks int64
}

func NewFastWorkerPool(numWorkers int, queueSize int) *FastWorkerPool {
    if numWorkers <= 0 {
        numWorkers = runtime.NumCPU()
    }
    
    ctx, cancel := context.WithCancel(context.Background())
    
    pool := &FastWorkerPool{
        workers:   make([]*FastWorker, numWorkers),
        taskQueue: make(chan Task, queueSize),
        ctx:       ctx,
        cancel:    cancel,
    }
    
    // 创建工作者
    for i := 0; i < numWorkers; i++ {
        worker := &FastWorker{
            id:         i,
            pool:       pool,
            taskQueue:  pool.taskQueue,
            localQueue: make([]Task, 0, 16), // 本地队列
            lastActive: time.Now(),
        }
        
        pool.workers[i] = worker
        
        pool.wg.Add(1)
        go worker.run()
    }
    
    return pool
}

func (p *FastWorkerPool) Submit(task Task) bool {
    atomic.AddInt64(&p.submittedTasks, 1)
    
    select {
    case p.taskQueue <- task:
        return true
    default:
        // 队列满了,尝试直接分配给工作者
        return p.tryDirectSubmit(task)
    }
}

func (p *FastWorkerPool) tryDirectSubmit(task Task) bool {
    // 找到最空闲的工作者
    var targetWorker *FastWorker
    minTasks := int64(1000000)
    
    for _, worker := range p.workers {
        processed := atomic.LoadInt64(&worker.processedTasks)
        if processed < minTasks {
            minTasks = processed
            targetWorker = worker
        }
    }
    
    if targetWorker != nil {
        // 尝试添加到本地队列
        if len(targetWorker.localQueue) < cap(targetWorker.localQueue) {
            targetWorker.localQueue = append(targetWorker.localQueue, task)
            return true
        }
    }
    
    return false
}

func (p *FastWorkerPool) Stop() {
    close(p.taskQueue)
    p.cancel()
    p.wg.Wait()
}

func (p *FastWorkerPool) Stats() (submitted, completed, failed int64) {
    return atomic.LoadInt64(&p.submittedTasks),
           atomic.LoadInt64(&p.completedTasks),
           atomic.LoadInt64(&p.failedTasks)
}

func (w *FastWorker) run() {
    defer w.pool.wg.Done()
    
    // 批量处理优化
    const batchSize = 10
    batch := make([]Task, 0, batchSize)
    
    for {
        // 尝试批量获取任务
        batch = batch[:0]
        
        // 首先处理本地队列
        if len(w.localQueue) > 0 {
            batchCount := len(w.localQueue)
            if batchCount > batchSize {
                batchCount = batchSize
            }
            
            batch = append(batch, w.localQueue[:batchCount]...)
            w.localQueue = w.localQueue[batchCount:]
        }
        
        // 如果本地队列没有任务,从全局队列获取
        if len(batch) == 0 {
            select {
            case task, ok := <-w.taskQueue:
                if !ok {
                    return // 工作池已关闭
                }
                batch = append(batch, task)
                
                // 尝试获取更多任务
                for len(batch) < batchSize {
                    select {
                    case task := <-w.taskQueue:
                        batch = append(batch, task)
                    default:
                        goto processBatch
                    }
                }
                
            case <-w.pool.ctx.Done():
                return
            }
        }
        
    processBatch:
        // 批量处理任务
        for _, task := range batch {
            w.processTask(task)
        }
        
        w.lastActive = time.Now()
    }
}

func (w *FastWorker) processTask(task Task) {
    defer func() {
        if r := recover(); r != nil {
            atomic.AddInt64(&w.pool.failedTasks, 1)
        }
    }()
    
    err := task.Execute(w.pool.ctx)
    
    if err != nil {
        atomic.AddInt64(&w.pool.failedTasks, 1)
    } else {
        atomic.AddInt64(&w.pool.completedTasks, 1)
    }
    
    atomic.AddInt64(&w.processedTasks, 1)
}

📊 性能监控和分析

性能分析器

go
// pkg/bubble/performance/profiler.go
package performance

import (
    "runtime"
    "sync"
    "time"
)

// 性能分析器
type Profiler struct {
    mu        sync.RWMutex
    startTime time.Time
    samples   []ProfileSample
    enabled   bool
}

type ProfileSample struct {
    Timestamp   time.Time
    MemAlloc    uint64
    MemTotal    uint64
    NumGC       uint32
    GCPause     time.Duration
    NumRoutines int
    CPUPercent  float64
}

func NewProfiler() *Profiler {
    return &Profiler{
        startTime: time.Now(),
        samples:   make([]ProfileSample, 0, 1000),
        enabled:   true,
    }
}

func (p *Profiler) Enable() {
    p.mu.Lock()
    defer p.mu.Unlock()
    p.enabled = true
}

func (p *Profiler) Disable() {
    p.mu.Lock()
    defer p.mu.Unlock()
    p.enabled = false
}

func (p *Profiler) Sample() {
    p.mu.Lock()
    defer p.mu.Unlock()
    
    if !p.enabled {
        return
    }
    
    var ms runtime.MemStats
    runtime.ReadMemStats(&ms)
    
    sample := ProfileSample{
        Timestamp:   time.Now(),
        MemAlloc:    ms.Alloc,
        MemTotal:    ms.TotalAlloc,
        NumGC:       ms.NumGC,
        NumRoutines: runtime.NumGoroutine(),
    }
    
    // 计算 GC 暂停时间
    if len(ms.PauseNs) > 0 {
        sample.GCPause = time.Duration(ms.PauseNs[(ms.NumGC+255)%256])
    }
    
    p.samples = append(p.samples, sample)
    
    // 保持样本数量在合理范围
    if len(p.samples) > 1000 {
        p.samples = p.samples[100:]
    }
}

func (p *Profiler) GetSamples() []ProfileSample {
    p.mu.RLock()
    defer p.mu.RUnlock()
    
    result := make([]ProfileSample, len(p.samples))
    copy(result, p.samples)
    return result
}

func (p *Profiler) GetMemoryUsage() (current, peak uint64) {
    p.mu.RLock()
    defer p.mu.RUnlock()
    
    if len(p.samples) == 0 {
        return 0, 0
    }
    
    latest := p.samples[len(p.samples)-1]
    current = latest.MemAlloc
    
    for _, sample := range p.samples {
        if sample.MemAlloc > peak {
            peak = sample.MemAlloc
        }
    }
    
    return current, peak
}

// 自动性能监控
func (p *Profiler) StartAutoSampling(interval time.Duration) {
    go func() {
        ticker := time.NewTicker(interval)
        defer ticker.Stop()
        
        for range ticker.C {
            p.Sample()
        }
    }()
}

📚 性能最佳实践

1. 渲染优化清单

go
// 性能检查清单
type PerformanceChecklist struct {
    checks []PerformanceCheck
}

type PerformanceCheck struct {
    Name        string
    Description string
    Check       func() (bool, string)
    Critical    bool
}

func NewPerformanceChecklist() *PerformanceChecklist {
    return &PerformanceChecklist{
        checks: []PerformanceCheck{
            {
                Name:        "View Caching",
                Description: "检查视图是否使用了缓存",
                Check: func() (bool, string) {
                    // 检查缓存命中率
                    return true, "缓存正常工作"
                },
                Critical: false,
            },
            {
                Name:        "Memory Usage",
                Description: "检查内存使用是否在合理范围",
                Check: func() (bool, string) {
                    var ms runtime.MemStats
                    runtime.ReadMemStats(&ms)
                    
                    if ms.Alloc > 100*1024*1024 { // 100MB
                        return false, fmt.Sprintf("内存使用过高: %d MB", ms.Alloc/(1024*1024))
                    }
                    return true, "内存使用正常"
                },
                Critical: true,
            },
            {
                Name:        "Goroutine Count",
                Description: "检查协程数量是否正常",
                Check: func() (bool, string) {
                    count := runtime.NumGoroutine()
                    if count > 1000 {
                        return false, fmt.Sprintf("协程数量过多: %d", count)
                    }
                    return true, "协程数量正常"
                },
                Critical: true,
            },
        },
    }
}

func (c *PerformanceChecklist) RunChecks() map[string]bool {
    results := make(map[string]bool)
    
    for _, check := range c.checks {
        passed, message := check.Check()
        results[check.Name] = passed
        
        if !passed && check.Critical {
            fmt.Printf("⚠️  关键性能问题: %s - %s\n", check.Name, message)
        } else if !passed {
            fmt.Printf("ℹ️  性能提醒: %s - %s\n", check.Name, message)
        }
    }
    
    return results
}

2. 基准测试模板

go
// 性能基准测试
func BenchmarkProgressRendering(b *testing.B) {
    renderer := NewOptimizedProgressRenderer()
    tasks := generateTestTasks(100)
    
    b.ResetTimer()
    b.ReportAllocs()
    
    for i := 0; i < b.N; i++ {
        _ = renderer.RenderWithPool(tasks)
    }
}

func BenchmarkConcurrentProcessing(b *testing.B) {
    pool := NewFastWorkerPool(4, 1000)
    defer pool.Stop()
    
    tasks := generateBenchmarkTasks(b.N)
    
    b.ResetTimer()
    
    for i := 0; i < b.N; i++ {
        pool.Submit(tasks[i])
    }
}

// 性能回归测试
func TestPerformanceRegression(t *testing.T) {
    // 设置性能基准
    maxRenderTime := 10 * time.Millisecond
    maxMemoryUsage := 50 * 1024 * 1024 // 50MB
    
    renderer := NewOptimizedProgressRenderer()
    tasks := generateTestTasks(100)
    
    start := time.Now()
    var ms1, ms2 runtime.MemStats
    
    runtime.ReadMemStats(&ms1)
    _ = renderer.RenderWithPool(tasks)
    runtime.ReadMemStats(&ms2)
    
    renderTime := time.Since(start)
    memoryUsed := ms2.Alloc - ms1.Alloc
    
    if renderTime > maxRenderTime {
        t.Errorf("渲染时间超标: %v > %v", renderTime, maxRenderTime)
    }
    
    if memoryUsed > uint64(maxMemoryUsage) {
        t.Errorf("内存使用超标: %d > %d", memoryUsed, maxMemoryUsage)
    }
}

📚 相关资源

项目文档

外部参考


💡 性能建议: 性能优化应该基于实际测量数据,避免过早优化。使用工具进行分析,关注关键路径,平衡复杂度和性能收益。

基于 MIT 许可证发布