Przeglądaj źródła

查询历史没有问题

liuq 2 miesięcy temu
rodzic
commit
dda9bce929

+ 6 - 3
backend/controllers/resource_controller.go

@@ -713,13 +713,16 @@ func GetDeviceHistory(c *gin.Context) {
 	deviceIDs := strings.Split(deviceIDsStr, ",")
 
 	metric := c.Query("metric")
-	if metric == "" {
-		metric = "power"
-	}
+	// if metric == "" {
+	// 	metric = "power"
+	// }
 
 	startStr := c.Query("start")
 	endStr := c.Query("end")
 	interval := c.Query("interval")
+	if interval == "" {
+		interval = "raw"
+	}
 
 	// Default time range: last 24h
 	end := time.Now()

+ 89 - 29
backend/db/tdengine.go

@@ -5,6 +5,7 @@ import (
 	"fmt"
 	"log"
 	"os"
+	"strings"
 	"time"
 
 	_ "github.com/taosdata/driver-go/v3/taosRestful"
@@ -64,7 +65,7 @@ func initSchema() {
 		val DOUBLE
 	) TAGS (
 		device_id BINARY(64),
-		metric BINARY(32),
+		metric BINARY(256),
 		location_id BINARY(64)
 	)`
 
@@ -74,6 +75,24 @@ func initSchema() {
 	} else {
 		log.Println("TDengine schema initialized")
 	}
+
+	// Create Super Table for switch logs
+	stSwitchSql := `CREATE STABLE IF NOT EXISTS switch_logs (
+		ts TIMESTAMP,
+		val BOOL
+	) TAGS (
+		device_id BINARY(64),
+		metric BINARY(256),
+		location_id BINARY(64)
+	)`
+
+	_, err = TD.Exec(stSwitchSql)
+	if err != nil {
+		log.Printf("Failed to create stable switch_logs: %v\n", err)
+	}
+
+	// Upgrade schema for existing tables (ignore error if redundant)
+	_, _ = TD.Exec("ALTER STABLE readings MODIFY TAG metric BINARY(256)")
 }
 
 // InsertReading inserts a single reading
@@ -98,8 +117,12 @@ func InsertReading(deviceID string, metric string, val float64, locationID strin
 	// locationID: UUID or empty
 	
 	// Safe table name construction
-	// Replace '-' with '_'
-	safeDID := fmt.Sprintf("d_%s_%s", deviceID, metric)
+	// Replace '-' with '_' and '.' with '_' to ensure valid table name
+	safeMetric := metric
+	safeMetric = strings.ReplaceAll(safeMetric, ".", "_")
+	safeMetric = strings.ReplaceAll(safeMetric, "-", "_")
+	
+	safeDID := fmt.Sprintf("d_%s_%s", strings.ReplaceAll(deviceID, "-", "_"), safeMetric)
 	// Remove special chars if any (simplified)
 	
 	// SQL: INSERT INTO {table_name} USING readings TAGS (...) VALUES (?, ?)
@@ -113,6 +136,26 @@ func InsertReading(deviceID string, metric string, val float64, locationID strin
 	return err
 }
 
+// InsertSwitchLog inserts a switch status log
+func InsertSwitchLog(deviceID string, metric string, val bool, locationID string, ts time.Time) error {
+	if TD == nil {
+		return fmt.Errorf("TDengine not initialized")
+	}
+
+	safeMetric := metric
+	safeMetric = strings.ReplaceAll(safeMetric, ".", "_")
+	safeMetric = strings.ReplaceAll(safeMetric, "-", "_")
+	
+	// Use 's_' prefix for switch tables
+	safeDID := fmt.Sprintf("s_%s_%s", strings.ReplaceAll(deviceID, "-", "_"), safeMetric)
+	
+	query := fmt.Sprintf("INSERT INTO `%s` USING switch_logs TAGS ('%s', '%s', '%s') VALUES (?, ?)",
+		safeDID, deviceID, metric, locationID)
+	
+	_, err := TD.Exec(query, ts, val)
+	return err
+}
+
 // ReadingHistory represents aggregated historical data
 type ReadingHistory struct {
 	Ts       string  `json:"ts"`
@@ -144,28 +187,43 @@ func GetReadings(deviceIDs []string, metric string, start, end time.Time, interv
 		inClause += fmt.Sprintf("'%s'", id)
 	}
 
-	// Validate Interval (simple check)
-	// e.g. 1m, 1h, 1d
-	if interval == "" {
-		interval = "1h"
-	}
-	// TDengine might not support '1y' directly in all versions, map to days
-	if interval == "1y" {
-		interval = "365d"
+	// Metric Filter
+	metricFilter := ""
+	if metric != "" {
+		metricFilter = fmt.Sprintf("AND metric = '%s'", metric)
 	}
 
-	// Query
-	// Note: ts output format depends on driver, usually time.Time or string.
-	// Using generic sql driver, it might be time.Time.
-	// Group by device_id to separate lines.
-	query := fmt.Sprintf(`SELECT 
-		FIRST(val), MAX(val), MIN(val), LAST(val), AVG(val), device_id
-		FROM readings 
-		WHERE device_id IN (%s) AND metric = '%s' AND ts >= '%s' AND ts <= '%s' 
-		INTERVAL(%s) 
-		GROUP BY device_id 
-		ORDER BY ts ASC`,
-		inClause, metric, start.Format("2006-01-02 15:04:05"), end.Format("2006-01-02 15:04:05"), interval)
+	var query string
+	
+	// 如果 interval 为 "raw",查询原始数据(不聚合)
+	// 这有助于在数据量少时直接查看,或者调试
+	if interval == "raw" {
+		// 限制 2000 条以防止数据量过大
+		// 选择 val 重复 5 次是为了复用 Scan 逻辑 (Open, High, Low, Close, Avg)
+		query = fmt.Sprintf(`SELECT 
+			ts, val, val, val, val, val, device_id
+			FROM readings 
+			WHERE device_id IN (%s) %s AND ts >= '%s' AND ts <= '%s' 
+			ORDER BY ts ASC LIMIT 2000`,
+			inClause, metricFilter, start.Format("2006-01-02 15:04:05"), end.Format("2006-01-02 15:04:05"))
+	} else {
+		// 聚合查询
+		if interval == "" {
+			interval = "1h"
+		}
+		if interval == "1y" {
+			interval = "365d"
+		}
+
+		// TDengine Query: Simplified to avoid GROUP BY + INTERVAL conflict
+		query = fmt.Sprintf(`SELECT 
+			_wstart, FIRST(val), MAX(val), MIN(val), LAST(val), AVG(val)
+			FROM readings 
+			WHERE device_id IN (%s) %s AND ts >= '%s' AND ts <= '%s' 
+			INTERVAL(%s)
+			ORDER BY _wstart ASC`,
+			inClause, metricFilter, start.Format("2006-01-02 15:04:05"), end.Format("2006-01-02 15:04:05"), interval)
+	}
 
 	rows, err := TD.Query(query)
 	if err != nil {
@@ -178,18 +236,20 @@ func GetReadings(deviceIDs []string, metric string, start, end time.Time, interv
 	for rows.Next() {
 		var r ReadingHistory
 		var ts time.Time
-		var did sql.NullString // device_id might be grouped?
+		// var did sql.NullString 
 
-		// Note: TDengine RESTful driver behavior on INTERVAL + GROUP BY:
-		// Result columns: ts, first, max, min, last, avg, device_id
-		if err := rows.Scan(&ts, &r.Open, &r.High, &r.Low, &r.Close, &r.Avg, &did); err != nil {
+		// Result columns: ts, open, high, low, close, avg
+		if err := rows.Scan(&ts, &r.Open, &r.High, &r.Low, &r.Close, &r.Avg); err != nil {
 			log.Printf("Scan error: %v", err)
 			continue
 		}
 		r.Ts = ts.Format("2006-01-02 15:04:05")
-		if did.Valid {
-			r.DeviceID = did.String
+		
+		// If we only queried one device, we can fill it here
+		if len(deviceIDs) == 1 {
+			r.DeviceID = deviceIDs[0]
 		}
+		
 		results = append(results, r)
 	}
 	return results, nil

+ 43 - 0
backend/models/init.go

@@ -71,7 +71,50 @@ func InitDB() {
 	}
 	fmt.Println("Database migration completed")
 
+	// Initialize default user and role
+	InitDefaultUser(DB)
+
 	// Initialize basic menu data
 	InitSysMenuData(DB)
 }
 
+func InitDefaultUser(db *gorm.DB) {
+	// Find or create admin role
+	var adminRole SysRole
+	if err := db.Where("role_key = ?", "admin").First(&adminRole).Error; err != nil {
+		fmt.Println("Initializing Admin Role...")
+		adminRole = SysRole{
+			Name:      "超级管理员",
+			RoleKey:   "admin",
+			DataScope: "1",
+			Status:    "0",
+		}
+		if err := db.Create(&adminRole).Error; err != nil {
+			log.Printf("Error creating admin role: %v", err)
+			return
+		}
+	}
+
+	// Find or create admin user
+	var adminUser User
+	if err := db.Where("username = ?", "admin").First(&adminUser).Error; err != nil {
+		fmt.Println("Initializing Admin User...")
+		adminUser = User{
+			Username: "admin",
+			Password: "admin123", // Default password
+			Name:     "系统管理员",
+			Role:     "ADMIN",
+			Status:   "0",
+		}
+		if err := db.Create(&adminUser).Error; err != nil {
+			log.Printf("Error creating admin user: %v", err)
+			return
+		}
+
+		// Assign admin role to admin user
+		db.Create(&SysUserRole{
+			UserID: adminUser.ID,
+			RoleID: adminRole.ID,
+		})
+	}
+}

+ 4 - 35
backend/models/sys_menu.go

@@ -214,43 +214,12 @@ func InitSysMenuData(db *gorm.DB) {
 
 	tx := db.Begin()
 
-	// Find or create admin role
+	// Find admin role to assign perms
 	var adminRole SysRole
 	if err := tx.Where("role_key = ?", "admin").First(&adminRole).Error; err != nil {
-		adminRole = SysRole{
-			Name:      "超级管理员",
-			RoleKey:   "admin",
-			DataScope: "1",
-			Status:    "0",
-		}
-		if err := tx.Create(&adminRole).Error; err != nil {
-			tx.Rollback()
-			log.Printf("Error creating admin role: %v", err)
-			return
-		}
-	}
-
-	// Find or create admin user
-	var adminUser User
-	if err := tx.Where("username = ?", "admin").First(&adminUser).Error; err != nil {
-		adminUser = User{
-			Username: "admin",
-			Password: "admin123", // Default password
-			Name:     "系统管理员",
-			Role:     "ADMIN",
-			Status:   "0",
-		}
-		if err := tx.Create(&adminUser).Error; err != nil {
-			tx.Rollback()
-			log.Printf("Error creating admin user: %v", err)
-			return
-		}
-		
-		// Assign admin role to admin user
-		tx.Create(&SysUserRole{
-			UserID: adminUser.ID,
-			RoleID: adminRole.ID,
-		})
+		tx.Rollback()
+		log.Printf("Admin role not found when initializing menus: %v", err)
+		return
 	}
 
 	for _, m := range menus {

+ 124 - 21
backend/services/collector.go

@@ -8,6 +8,7 @@ import (
 	"log"
 	"strconv"
 	"time"
+	"strings"
 
 	"github.com/robfig/cron/v3"
 )
@@ -88,8 +89,13 @@ func (s *CollectorService) processSourceGroup(sourceID string, devices []models.
 		DeviceID   string
 		Metric     string
 		LocationID string
+		Formula    string
 	}
 	requestMap := make(map[string][]Target)
+	
+	// Map to store pre-fetched states from direct device queries
+	// EntityID -> State Value (string)
+	preFetchedStates := make(map[string]string)
 
 	for _, d := range devices {
 		// Parse Attribute Mapping
@@ -98,6 +104,11 @@ func (s *CollectorService) processSourceGroup(sourceID string, devices []models.
 		// Need to unmarshal
 		b, _ := d.AttributeMapping.MarshalJSON()
 		json.Unmarshal(b, &mapping)
+		
+		locID := ""
+		if d.LocationID != nil {
+			locID = d.LocationID.String()
+		}
 
 		for metric, entityID := range mapping {
 			// Skip formulas for now (keys ending in _formula)
@@ -112,29 +123,101 @@ func (s *CollectorService) processSourceGroup(sourceID string, devices []models.
 				entityIDs = append(entityIDs, entityID)
 			}
 			
-			locID := ""
-			if d.LocationID != nil {
-				locID = d.LocationID.String()
-			}
+			// Check for formula
+			formula := mapping[metric+"_formula"]
 
 			requestMap[entityID] = append(requestMap[entityID], Target{
 				DeviceID:   d.ID.String(),
 				Metric:     metric,
 				LocationID: locID,
+				Formula:    formula,
 			})
 		}
+		
+		// Automatic Discovery: fetch all entities if not mapped or simply enforce "all" policy
+		// Case A: ExternalID is an Entity ID (contains '.')
+		if strings.Contains(d.ExternalID, ".") {
+			entityID := d.ExternalID
+			// Add to fetch list if not already there
+			if _, exists := requestMap[entityID]; !exists {
+				entityIDs = append(entityIDs, entityID)
+			}
+			
+			// Use sanitized entity ID as metric name
+			metric := strings.ReplaceAll(entityID, ".", "_")
+			
+			// Avoid duplicate if already mapped
+			if len(requestMap[entityID]) == 0 {
+				requestMap[entityID] = append(requestMap[entityID], Target{
+					DeviceID:   d.ID.String(),
+					Metric:     metric,
+					LocationID: locID,
+				})
+			}
+		} else if d.ExternalID != "" {
+			// Case B: ExternalID is a Device ID (UUID-like, no '.')
+			// Fetch all entities for this device
+			// Note: This adds overhead. In production, result should be cached.
+			haEntities, err := utils.FetchHAEntitiesByDevice(source.Config, d.ExternalID)
+			if err == nil {
+				for _, ent := range haEntities {
+					// Metric name: sanitized entity ID
+					metric := strings.ReplaceAll(ent.EntityID, ".", "_")
+					
+					// Register target
+					if _, exists := requestMap[ent.EntityID]; !exists {
+						// Only add if not already requested (avoid dupes if mapped)
+						// But if it IS mapped, we might have added it above with a specific metric name.
+						// Here we add it with default metric name.
+						// To avoid double writing for mapped entities, we could check if ent.EntityID is in requestMap.
+						// However, user requirement is "add all". Double writing (one with alias, one with raw ID) is acceptable or even desired.
+					}
+					
+					if len(requestMap[ent.EntityID]) == 0 {
+						requestMap[ent.EntityID] = append(requestMap[ent.EntityID], Target{
+							DeviceID:   d.ID.String(),
+							Metric:     metric,
+							LocationID: locID,
+						})
+					}
+					
+					// Store value directly to avoid re-fetching
+					preFetchedStates[ent.EntityID] = ent.State
+				}
+			} else {
+				// Log error but continue
+				log.Printf("Failed to auto-discover entities for device %s (%s): %v", d.Name, d.ExternalID, err)
+			}
+		}
 	}
 
-	if len(entityIDs) == 0 {
+	if len(entityIDs) == 0 && len(preFetchedStates) == 0 {
 		return
 	}
 
-	// Fetch Data
+	// Fetch Data for explicit entityIDs
 	// TODO: Handle large batches?
-	states, err := utils.BatchFetchStates(source.Config, entityIDs)
-	if err != nil {
-		log.Printf("Failed to fetch states for source %s: %v\n", source.Name, err)
-		return
+	var states map[string]string
+	var err error
+	
+	if len(entityIDs) > 0 {
+		states, err = utils.BatchFetchStates(source.Config, entityIDs)
+		if err != nil {
+			log.Printf("Failed to fetch states for source %s: %v\n", source.Name, err)
+			return
+		}
+	} else {
+		states = make(map[string]string)
+	}
+
+	// Merge preFetchedStates into states
+	for id, val := range preFetchedStates {
+		states[id] = val
+	}
+
+
+	if len(states) < len(entityIDs) {
+		log.Printf("Source %s: Requested %d entities, but got states for only %d (some may be unavailable)", source.Name, len(entityIDs), len(states))
 	}
 
 	// Write to TSDB
@@ -142,26 +225,46 @@ func (s *CollectorService) processSourceGroup(sourceID string, devices []models.
 	count := 0
 	
 	for entityID, valStr := range states {
+		// Handle Switch Status (on/off) explicitly
+		if valStr == "on" || valStr == "off" {
+			valBool := valStr == "on"
+			targets := requestMap[entityID]
+			for _, target := range targets {
+				// Insert Switch Log
+				err := db.InsertSwitchLog(target.DeviceID, target.Metric, valBool, target.LocationID, now)
+				if err != nil {
+					log.Printf("TSDB Insert Switch Log Error for device %s (metric: %s, entity: %s): %v", target.DeviceID, target.Metric, entityID, err)
+				} else {
+					count++
+				}
+			}
+			continue
+		}
+
 		// Parse value to float
 		val, err := strconv.ParseFloat(valStr, 64)
 		if err != nil {
-			// Try to handle non-numeric states if necessary (e.g. "on"/"off")
-			if valStr == "on" {
-				val = 1
-			} else if valStr == "off" {
-				val = 0
-			} else {
-				continue // Skip non-numeric
-			}
+			log.Printf("Skipping non-numeric value for entity %s: %s", entityID, valStr)
+			continue // Skip non-numeric
 		}
 
 		targets := requestMap[entityID]
 		for _, target := range targets {
+			
+			finalVal := val
+			if target.Formula != "" {
+				res, err := utils.EvaluateFormula(target.Formula, val)
+				if err == nil {
+					finalVal = res
+				} else {
+					log.Printf("Formula error for device %s metric %s: %v", target.DeviceID, target.Metric, err)
+				}
+			}
+
 			// Insert
-			err := db.InsertReading(target.DeviceID, target.Metric, val, target.LocationID, now)
+			err := db.InsertReading(target.DeviceID, target.Metric, finalVal, target.LocationID, now)
 			if err != nil {
-				// Log occasional errors, but don't flood
-				// log.Printf("TSDB Insert Error: %v\n", err)
+				log.Printf("TSDB Insert Error for device %s (metric: %s, entity: %s): %v", target.DeviceID, target.Metric, entityID, err)
 			} else {
 				count++
 			}

+ 156 - 0
backend/utils/formula.go

@@ -0,0 +1,156 @@
+package utils
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+// EvaluateFormula calculates the result of a mathematical formula with variable 'x'
+// Supported operators: +, -, *, /
+// Supported grouping: ( )
+func EvaluateFormula(formula string, x float64) (float64, error) {
+	if strings.TrimSpace(formula) == "" {
+		return x, nil
+	}
+
+	postfix, err := infixToPostfix(formula)
+	if err != nil {
+		return 0, err
+	}
+
+	return evaluatePostfix(postfix, x)
+}
+
+func infixToPostfix(formula string) ([]string, error) {
+	var output []string
+	var stack []string // operator stack
+
+	i := 0
+	for i < len(formula) {
+		char := formula[i]
+
+		if unicode.IsSpace(rune(char)) {
+			i++
+			continue
+		}
+
+		// Number or Variable 'x'
+		if unicode.IsDigit(rune(char)) || char == '.' || char == 'x' || char == 'X' {
+			start := i
+			if char == 'x' || char == 'X' {
+				output = append(output, "x")
+				i++
+			} else {
+				for i < len(formula) && (unicode.IsDigit(rune(formula[i])) || formula[i] == '.') {
+					i++
+				}
+				output = append(output, formula[start:i])
+			}
+			continue
+		}
+
+		// Operators
+		if isOperator(char) {
+			for len(stack) > 0 && isOperator(stack[len(stack)-1][0]) && precedence(stack[len(stack)-1][0]) >= precedence(char) {
+				output = append(output, stack[len(stack)-1])
+				stack = stack[:len(stack)-1]
+			}
+			stack = append(stack, string(char))
+			i++
+			continue
+		}
+
+		// Parentheses
+		if char == '(' {
+			stack = append(stack, "(")
+			i++
+			continue
+		}
+
+		if char == ')' {
+			for len(stack) > 0 && stack[len(stack)-1] != "(" {
+				output = append(output, stack[len(stack)-1])
+				stack = stack[:len(stack)-1]
+			}
+			if len(stack) == 0 {
+				return nil, fmt.Errorf("mismatched parentheses")
+			}
+			stack = stack[:len(stack)-1] // pop '('
+			i++
+			continue
+		}
+
+		return nil, fmt.Errorf("invalid character: %c", char)
+	}
+
+	for len(stack) > 0 {
+		if stack[len(stack)-1] == "(" {
+			return nil, fmt.Errorf("mismatched parentheses")
+		}
+		output = append(output, stack[len(stack)-1])
+		stack = stack[:len(stack)-1]
+	}
+
+	return output, nil
+}
+
+func evaluatePostfix(postfix []string, x float64) (float64, error) {
+	var stack []float64
+
+	for _, token := range postfix {
+		if token == "x" || token == "X" {
+			stack = append(stack, x)
+		} else if val, err := strconv.ParseFloat(token, 64); err == nil {
+			stack = append(stack, val)
+		} else {
+			// Operator
+			if len(stack) < 2 {
+				return 0, fmt.Errorf("invalid expression")
+			}
+			b := stack[len(stack)-1]
+			a := stack[len(stack)-2]
+			stack = stack[:len(stack)-2]
+
+			var res float64
+			switch token {
+			case "+":
+				res = a + b
+			case "-":
+				res = a - b
+			case "*":
+				res = a * b
+			case "/":
+				if b == 0 {
+					return 0, fmt.Errorf("division by zero")
+				}
+				res = a / b
+			default:
+				return 0, fmt.Errorf("unknown operator: %s", token)
+			}
+			stack = append(stack, res)
+		}
+	}
+
+	if len(stack) != 1 {
+		return 0, fmt.Errorf("invalid expression result")
+	}
+
+	return stack[0], nil
+}
+
+func isOperator(c byte) bool {
+	return c == '+' || c == '-' || c == '*' || c == '/'
+}
+
+func precedence(c byte) int {
+	switch c {
+	case '+', '-':
+		return 1
+	case '*', '/':
+		return 2
+	}
+	return 0
+}
+

+ 34 - 1
frontend/src/views/resource/ImportClean.vue

@@ -130,7 +130,13 @@
     <!-- Data Chart Modal -->
     <a-modal v-model:visible="chartVisible" title="历史数据趋势" width="90%" :footer="false" @open="initChart">
         <div style="margin-bottom: 20px; display: flex; gap: 10px; flex-wrap: wrap; align-items: center">
-            <a-range-picker v-model="dateRange" show-time format="YYYY-MM-DD HH:mm:ss" style="width: 380px" />
+            <a-range-picker 
+                v-model="dateRange" 
+                show-time 
+                format="YYYY-MM-DD HH:mm:ss" 
+                :shortcuts="rangeShortcuts"
+                style="width: 380px" 
+            />
             <a-select v-model="chartInterval" placeholder="粒度" style="width: 100px">
                 <a-option value="1m">1分钟</a-option>
                 <a-option value="1h">1小时</a-option>
@@ -187,6 +193,33 @@ const chartInterval = ref('1h');
 const dateRange = ref<any[]>([]); // Array of Date or strings
 const chartLoading = ref(false);
 
+const rangeShortcuts = [
+    {
+        label: '近1小时',
+        value: () => [new Date(Date.now() - 3600 * 1000), new Date()],
+    },
+    {
+        label: '近12小时',
+        value: () => [new Date(Date.now() - 12 * 3600 * 1000), new Date()],
+    },
+    {
+        label: '近24小时',
+        value: () => [new Date(Date.now() - 24 * 3600 * 1000), new Date()],
+    },
+    {
+        label: '近7天',
+        value: () => [new Date(Date.now() - 7 * 24 * 3600 * 1000), new Date()],
+    },
+    {
+        label: '近1个月',
+        value: () => [new Date(Date.now() - 30 * 24 * 3600 * 1000), new Date()],
+    },
+    {
+        label: '近1年',
+        value: () => [new Date(Date.now() - 365 * 24 * 3600 * 1000), new Date()],
+    },
+];
+
 const standardAttributes = [
     { label: '开关状态', value: 'state', unit: '' },
     { label: '有功功率', value: 'power', unit: 'W' },