소스 검색

修复协程问题

liuq 1 개월 전
부모
커밋
c460697efd
3개의 변경된 파일52개의 추가작업 그리고 5개의 파일을 삭제
  1. 12 2
      backend/services/ai_service.go
  2. 22 2
      backend/services/alarm_service.go
  3. 18 1
      backend/services/collector.go

+ 12 - 2
backend/services/ai_service.go

@@ -148,6 +148,16 @@ func NormalizeChatUrl(inputUrl string) string {
 	return strings.TrimRight(inputUrl, "/") + "/chat/completions"
 }
 
+// 全局 HTTP Client 复用,优化连接池
+var aiHttpClient = &http.Client{
+	Timeout: 60 * time.Second,
+	Transport: &http.Transport{
+		MaxIdleConns:        10,
+		IdleConnTimeout:     30 * time.Second,
+		DisableKeepAlives:   false,
+	},
+}
+
 // FetchAIResponse 实际调用 (Public for controller usage)
 func FetchAIResponse(apiUrl, apiKey, model, prompt string) (string, error) {
 	requestBody, _ := json.Marshal(map[string]interface{}{
@@ -165,8 +175,8 @@ func FetchAIResponse(apiUrl, apiKey, model, prompt string) (string, error) {
 	req.Header.Set("Content-Type", "application/json")
 	req.Header.Set("Authorization", "Bearer "+apiKey)
 
-	client := &http.Client{Timeout: 60 * time.Second}
-	resp, err := client.Do(req)
+	// 使用全局 Client 复用连接
+	resp, err := aiHttpClient.Do(req)
 	if err != nil {
 		return "", err
 	}

+ 22 - 2
backend/services/alarm_service.go

@@ -28,12 +28,17 @@ type AlarmService struct {
 	mu       sync.RWMutex
 	states   map[string]*RuleState // Key: ruleID_deviceID
 	taskChan chan AlarmTask        // Buffered channel for alarm tasks
+	
+	// Rate limiting for auto-check
+	lastAutoCheckTime time.Time
+	checkMu           sync.Mutex
 }
 
 func NewAlarmService() *AlarmService {
 	GlobalAlarmService = &AlarmService{
 		states:   make(map[string]*RuleState),
 		taskChan: make(chan AlarmTask, 5000), // Buffer size 5000
+		lastAutoCheckTime: time.Now().Add(-60 * time.Second), // Allow immediate first check
 	}
 
 	// Start worker pool (e.g., 5 workers)
@@ -205,7 +210,22 @@ func (s *AlarmService) triggerAlarm(rule models.AlarmRule, deviceID string, valu
 	} else {
 		log.Printf("!!! ALARM TRIGGERED !!! Rule: %s, Device: %s, Value: %.2f", rule.Name, deviceID, value)
 		
-		// 触发自动处理检查
-		go CheckAndAutoHandleAlarms()
+		// 触发自动处理检查 (带频率限制)
+		s.triggerAutoCheck()
 	}
 }
+
+// triggerAutoCheck 触发自动处理检查,增加频率限制
+func (s *AlarmService) triggerAutoCheck() {
+	s.checkMu.Lock()
+	defer s.checkMu.Unlock()
+
+	// 如果距离上次检查不足 60 秒,则跳过
+	if time.Since(s.lastAutoCheckTime) < 60*time.Second {
+		return
+	}
+
+	s.lastAutoCheckTime = time.Now()
+	// 异步执行检查
+	go CheckAndAutoHandleAlarms()
+}

+ 18 - 1
backend/services/collector.go

@@ -21,6 +21,7 @@ type CollectorService struct {
 	DebugMode       bool
 	mu              sync.RWMutex
 	IntervalSeconds float64
+	runningSources  sync.Map
 }
 
 func NewCollectorService() *CollectorService {
@@ -151,11 +152,22 @@ func (s *CollectorService) collectJob() {
 
 	// 3. Process each group
 	for sourceID, devs := range deviceGroups {
+		// 并发控制:如果该源的上一次采集还在运行,则跳过本次
+		if _, loaded := s.runningSources.LoadOrStore(sourceID, true); loaded {
+			log.Printf("Warning: Collection for source %s is still running, skipping this cycle.", sourceID)
+			continue
+		}
 		go s.processSourceGroup(sourceID, devs)
 	}
 }
 
 func (s *CollectorService) processSourceGroup(sourceID string, devices []models.Device) {
+	// 任务结束时移除标记
+	defer s.runningSources.Delete(sourceID)
+
+	// 记录开始时间
+	start := time.Now()
+
 	// Fetch Source Config
 	var source models.IntegrationSource
 	if err := models.DB.First(&source, "id = ?", sourceID).Error; err != nil {
@@ -494,5 +506,10 @@ func (s *CollectorService) processSourceGroup(sourceID string, devices []models.
 		}
 	}
 	
-	log.Printf("Source %s: Collected %d data points for %d devices", source.Name, count, len(devices))
+	duration := time.Since(start)
+	log.Printf("Source %s: Collected %d data points for %d devices in %v", source.Name, count, len(devices), duration)
+	
+	if duration.Seconds() > s.IntervalSeconds {
+		log.Printf("PERFORMANCE WARNING: Collection for source %s took %v, which is longer than interval %.1fs", source.Name, duration, s.IntervalSeconds)
+	}
 }