mirror of
https://github.com/grafana/grafana.git
synced 2024-11-21 16:38:03 -06:00
mysql: decouple sqleng (#86057)
This commit is contained in:
parent
44adfea049
commit
b02317d583
@ -60,6 +60,8 @@ files = [
|
||||
"**/pkg/tsdb/azuremonitor/**/*",
|
||||
"**/pkg/tsdb/cloud-monitoring/*",
|
||||
"**/pkg/tsdb/cloud-monitoring/**/*",
|
||||
"**/pkg/tsdb/mysql/*",
|
||||
"**/pkg/tsdb/mysql/**/*",
|
||||
"**/pkg/tsdb/parca/*",
|
||||
"**/pkg/tsdb/parca/**/*",
|
||||
"**/pkg/tsdb/tempo/*",
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/gtime"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||
"github.com/grafana/grafana/pkg/tsdb/sqleng"
|
||||
"github.com/grafana/grafana/pkg/tsdb/mysql/sqleng"
|
||||
)
|
||||
|
||||
const rsIdentifier = `([_a-zA-Z0-9]+)`
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||
"github.com/grafana/grafana/pkg/tsdb/sqleng"
|
||||
"github.com/grafana/grafana/pkg/tsdb/mysql/sqleng"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/experimental"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb/sqleng"
|
||||
"github.com/grafana/grafana/pkg/tsdb/mysql/sqleng"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
)
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb/sqleng"
|
||||
"github.com/grafana/grafana/pkg/tsdb/mysql/sqleng"
|
||||
)
|
||||
|
||||
// To run this test, set runMySqlTests=true
|
||||
|
642
pkg/tsdb/mysql/sqleng/sql_engine.go
Normal file
642
pkg/tsdb/mysql/sqleng/sql_engine.go
Normal file
@ -0,0 +1,642 @@
|
||||
package sqleng
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/gtime"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||
)
|
||||
|
||||
// MetaKeyExecutedQueryString is the key where the executed query should get stored
|
||||
const MetaKeyExecutedQueryString = "executedQueryString"
|
||||
|
||||
// SQLMacroEngine interpolates macros into sql. It takes in the Query to have access to query context and
|
||||
// timeRange to be able to generate queries that use from and to.
|
||||
type SQLMacroEngine interface {
|
||||
Interpolate(query *backend.DataQuery, timeRange backend.TimeRange, sql string) (string, error)
|
||||
}
|
||||
|
||||
// SqlQueryResultTransformer transforms a query result row to RowValues with proper types.
|
||||
type SqlQueryResultTransformer interface {
|
||||
// TransformQueryError transforms a query error.
|
||||
TransformQueryError(logger log.Logger, err error) error
|
||||
GetConverterList() []sqlutil.StringConverter
|
||||
}
|
||||
|
||||
type JsonData struct {
|
||||
MaxOpenConns int `json:"maxOpenConns"`
|
||||
MaxIdleConns int `json:"maxIdleConns"`
|
||||
ConnMaxLifetime int `json:"connMaxLifetime"`
|
||||
ConnectionTimeout int `json:"connectionTimeout"`
|
||||
Timescaledb bool `json:"timescaledb"`
|
||||
Mode string `json:"sslmode"`
|
||||
ConfigurationMethod string `json:"tlsConfigurationMethod"`
|
||||
TlsSkipVerify bool `json:"tlsSkipVerify"`
|
||||
RootCertFile string `json:"sslRootCertFile"`
|
||||
CertFile string `json:"sslCertFile"`
|
||||
CertKeyFile string `json:"sslKeyFile"`
|
||||
Timezone string `json:"timezone"`
|
||||
Encrypt string `json:"encrypt"`
|
||||
Servername string `json:"servername"`
|
||||
TimeInterval string `json:"timeInterval"`
|
||||
Database string `json:"database"`
|
||||
SecureDSProxy bool `json:"enableSecureSocksProxy"`
|
||||
SecureDSProxyUsername string `json:"secureSocksProxyUsername"`
|
||||
AllowCleartextPasswords bool `json:"allowCleartextPasswords"`
|
||||
AuthenticationType string `json:"authenticationType"`
|
||||
}
|
||||
|
||||
type DataSourceInfo struct {
|
||||
JsonData JsonData
|
||||
URL string
|
||||
User string
|
||||
Database string
|
||||
ID int64
|
||||
Updated time.Time
|
||||
UID string
|
||||
DecryptedSecureJSONData map[string]string
|
||||
}
|
||||
|
||||
type DataPluginConfiguration struct {
|
||||
DSInfo DataSourceInfo
|
||||
TimeColumnNames []string
|
||||
MetricColumnTypes []string
|
||||
RowLimit int64
|
||||
}
|
||||
|
||||
type DataSourceHandler struct {
|
||||
macroEngine SQLMacroEngine
|
||||
queryResultTransformer SqlQueryResultTransformer
|
||||
db *sql.DB
|
||||
timeColumnNames []string
|
||||
metricColumnTypes []string
|
||||
log log.Logger
|
||||
dsInfo DataSourceInfo
|
||||
rowLimit int64
|
||||
userError string
|
||||
}
|
||||
|
||||
type QueryJson struct {
|
||||
RawSql string `json:"rawSql"`
|
||||
Fill bool `json:"fill"`
|
||||
FillInterval float64 `json:"fillInterval"`
|
||||
FillMode string `json:"fillMode"`
|
||||
FillValue float64 `json:"fillValue"`
|
||||
Format string `json:"format"`
|
||||
}
|
||||
|
||||
func (e *DataSourceHandler) TransformQueryError(logger log.Logger, err error) error {
|
||||
// OpError is the error type usually returned by functions in the net
|
||||
// package. It describes the operation, network type, and address of
|
||||
// an error. We log this error rather than return it to the client
|
||||
// for security purposes.
|
||||
var opErr *net.OpError
|
||||
if errors.As(err, &opErr) {
|
||||
logger.Error("Query error", "err", err)
|
||||
return fmt.Errorf("failed to connect to server - %s", e.userError)
|
||||
}
|
||||
|
||||
return e.queryResultTransformer.TransformQueryError(logger, err)
|
||||
}
|
||||
|
||||
func NewQueryDataHandler(userFacingDefaultError string, db *sql.DB, config DataPluginConfiguration, queryResultTransformer SqlQueryResultTransformer,
|
||||
macroEngine SQLMacroEngine, log log.Logger) (*DataSourceHandler, error) {
|
||||
queryDataHandler := DataSourceHandler{
|
||||
queryResultTransformer: queryResultTransformer,
|
||||
macroEngine: macroEngine,
|
||||
timeColumnNames: []string{"time"},
|
||||
log: log,
|
||||
dsInfo: config.DSInfo,
|
||||
rowLimit: config.RowLimit,
|
||||
userError: userFacingDefaultError,
|
||||
}
|
||||
|
||||
if len(config.TimeColumnNames) > 0 {
|
||||
queryDataHandler.timeColumnNames = config.TimeColumnNames
|
||||
}
|
||||
|
||||
if len(config.MetricColumnTypes) > 0 {
|
||||
queryDataHandler.metricColumnTypes = config.MetricColumnTypes
|
||||
}
|
||||
|
||||
queryDataHandler.db = db
|
||||
return &queryDataHandler, nil
|
||||
}
|
||||
|
||||
type DBDataResponse struct {
|
||||
dataResponse backend.DataResponse
|
||||
refID string
|
||||
}
|
||||
|
||||
func (e *DataSourceHandler) Dispose() {
|
||||
e.log.Debug("Disposing DB...")
|
||||
if e.db != nil {
|
||||
if err := e.db.Close(); err != nil {
|
||||
e.log.Error("Failed to dispose db", "error", err)
|
||||
}
|
||||
}
|
||||
e.log.Debug("DB disposed")
|
||||
}
|
||||
|
||||
func (e *DataSourceHandler) Ping() error {
|
||||
return e.db.Ping()
|
||||
}
|
||||
|
||||
func (e *DataSourceHandler) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
|
||||
result := backend.NewQueryDataResponse()
|
||||
ch := make(chan DBDataResponse, len(req.Queries))
|
||||
var wg sync.WaitGroup
|
||||
// Execute each query in a goroutine and wait for them to finish afterwards
|
||||
for _, query := range req.Queries {
|
||||
queryjson := QueryJson{
|
||||
Fill: false,
|
||||
Format: "time_series",
|
||||
}
|
||||
err := json.Unmarshal(query.JSON, &queryjson)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshal query json: %w", err)
|
||||
}
|
||||
|
||||
// the fill-params are only stored inside this function, during query-interpolation. we do not support
|
||||
// sending them in "from the outside"
|
||||
if queryjson.Fill || queryjson.FillInterval != 0.0 || queryjson.FillMode != "" || queryjson.FillValue != 0.0 {
|
||||
return nil, fmt.Errorf("query fill-parameters not supported")
|
||||
}
|
||||
|
||||
if queryjson.RawSql == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go e.executeQuery(query, &wg, ctx, ch, queryjson)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Read results from channels
|
||||
close(ch)
|
||||
result.Responses = make(map[string]backend.DataResponse)
|
||||
for queryResult := range ch {
|
||||
result.Responses[queryResult.refID] = queryResult.dataResponse
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (e *DataSourceHandler) executeQuery(query backend.DataQuery, wg *sync.WaitGroup, queryContext context.Context,
|
||||
ch chan DBDataResponse, queryJson QueryJson) {
|
||||
defer wg.Done()
|
||||
queryResult := DBDataResponse{
|
||||
dataResponse: backend.DataResponse{},
|
||||
refID: query.RefID,
|
||||
}
|
||||
|
||||
logger := e.log.FromContext(queryContext)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logger.Error("ExecuteQuery panic", "error", r, "stack", string(debug.Stack()))
|
||||
if theErr, ok := r.(error); ok {
|
||||
queryResult.dataResponse.Error = theErr
|
||||
} else if theErrString, ok := r.(string); ok {
|
||||
queryResult.dataResponse.Error = fmt.Errorf(theErrString)
|
||||
} else {
|
||||
queryResult.dataResponse.Error = fmt.Errorf("unexpected error - %s", e.userError)
|
||||
}
|
||||
ch <- queryResult
|
||||
}
|
||||
}()
|
||||
|
||||
if queryJson.RawSql == "" {
|
||||
panic("Query model property rawSql should not be empty at this point")
|
||||
}
|
||||
|
||||
timeRange := query.TimeRange
|
||||
|
||||
errAppendDebug := func(frameErr string, err error, query string) {
|
||||
var emptyFrame data.Frame
|
||||
emptyFrame.SetMeta(&data.FrameMeta{
|
||||
ExecutedQueryString: query,
|
||||
})
|
||||
queryResult.dataResponse.Error = fmt.Errorf("%s: %w", frameErr, err)
|
||||
queryResult.dataResponse.Frames = data.Frames{&emptyFrame}
|
||||
ch <- queryResult
|
||||
}
|
||||
|
||||
// global substitutions
|
||||
interpolatedQuery := Interpolate(query, timeRange, e.dsInfo.JsonData.TimeInterval, queryJson.RawSql)
|
||||
|
||||
// data source specific substitutions
|
||||
interpolatedQuery, err := e.macroEngine.Interpolate(&query, timeRange, interpolatedQuery)
|
||||
if err != nil {
|
||||
errAppendDebug("interpolation failed", e.TransformQueryError(logger, err), interpolatedQuery)
|
||||
return
|
||||
}
|
||||
|
||||
rows, err := e.db.QueryContext(queryContext, interpolatedQuery)
|
||||
if err != nil {
|
||||
errAppendDebug("db query error", e.TransformQueryError(logger, err), interpolatedQuery)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
logger.Warn("Failed to close rows", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
qm, err := e.newProcessCfg(query, queryContext, rows, interpolatedQuery)
|
||||
if err != nil {
|
||||
errAppendDebug("failed to get configurations", err, interpolatedQuery)
|
||||
return
|
||||
}
|
||||
|
||||
// Convert row.Rows to dataframe
|
||||
stringConverters := e.queryResultTransformer.GetConverterList()
|
||||
frame, err := sqlutil.FrameFromRows(rows, e.rowLimit, sqlutil.ToConverters(stringConverters...)...)
|
||||
if err != nil {
|
||||
errAppendDebug("convert frame from rows error", err, interpolatedQuery)
|
||||
return
|
||||
}
|
||||
|
||||
if frame.Meta == nil {
|
||||
frame.Meta = &data.FrameMeta{}
|
||||
}
|
||||
|
||||
frame.Meta.ExecutedQueryString = interpolatedQuery
|
||||
|
||||
// If no rows were returned, clear any previously set `Fields` with a single empty `data.Field` slice.
|
||||
// Then assign `queryResult.dataResponse.Frames` the current single frame with that single empty Field.
|
||||
// This assures 1) our visualization doesn't display unwanted empty fields, and also that 2)
|
||||
// additionally-needed frame data stays intact and is correctly passed to our visulization.
|
||||
if frame.Rows() == 0 {
|
||||
frame.Fields = []*data.Field{}
|
||||
queryResult.dataResponse.Frames = data.Frames{frame}
|
||||
ch <- queryResult
|
||||
return
|
||||
}
|
||||
|
||||
if err := convertSQLTimeColumnsToEpochMS(frame, qm); err != nil {
|
||||
errAppendDebug("converting time columns failed", err, interpolatedQuery)
|
||||
return
|
||||
}
|
||||
|
||||
if qm.Format == dataQueryFormatSeries {
|
||||
// time series has to have time column
|
||||
if qm.timeIndex == -1 {
|
||||
errAppendDebug("db has no time column", errors.New("no time column found"), interpolatedQuery)
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure to name the time field 'Time' to be backward compatible with Grafana pre-v8.
|
||||
frame.Fields[qm.timeIndex].Name = data.TimeSeriesTimeFieldName
|
||||
|
||||
for i := range qm.columnNames {
|
||||
if i == qm.timeIndex || i == qm.metricIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
if t := frame.Fields[i].Type(); t == data.FieldTypeString || t == data.FieldTypeNullableString {
|
||||
continue
|
||||
}
|
||||
|
||||
var err error
|
||||
if frame, err = convertSQLValueColumnToFloat(frame, i); err != nil {
|
||||
errAppendDebug("convert value to float failed", err, interpolatedQuery)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tsSchema := frame.TimeSeriesSchema()
|
||||
if tsSchema.Type == data.TimeSeriesTypeLong {
|
||||
var err error
|
||||
originalData := frame
|
||||
frame, err = data.LongToWide(frame, qm.FillMissing)
|
||||
if err != nil {
|
||||
errAppendDebug("failed to convert long to wide series when converting from dataframe", err, interpolatedQuery)
|
||||
return
|
||||
}
|
||||
|
||||
// Before 8x, a special metric column was used to name time series. The LongToWide transforms that into a metric label on the value field.
|
||||
// But that makes series name have both the value column name AND the metric name. So here we are removing the metric label here and moving it to the
|
||||
// field name to get the same naming for the series as pre v8
|
||||
if len(originalData.Fields) == 3 {
|
||||
for _, field := range frame.Fields {
|
||||
if len(field.Labels) == 1 { // 7x only supported one label
|
||||
name, ok := field.Labels["metric"]
|
||||
if ok {
|
||||
field.Name = name
|
||||
field.Labels = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if qm.FillMissing != nil {
|
||||
// we align the start-time
|
||||
startUnixTime := qm.TimeRange.From.Unix() / int64(qm.Interval.Seconds()) * int64(qm.Interval.Seconds())
|
||||
alignedTimeRange := backend.TimeRange{
|
||||
From: time.Unix(startUnixTime, 0),
|
||||
To: qm.TimeRange.To,
|
||||
}
|
||||
|
||||
var err error
|
||||
frame, err = sqlutil.ResampleWideFrame(frame, qm.FillMissing, alignedTimeRange, qm.Interval)
|
||||
if err != nil {
|
||||
logger.Error("Failed to resample dataframe", "err", err)
|
||||
frame.AppendNotices(data.Notice{Text: "Failed to resample dataframe", Severity: data.NoticeSeverityWarning})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
queryResult.dataResponse.Frames = data.Frames{frame}
|
||||
ch <- queryResult
|
||||
}
|
||||
|
||||
// Interpolate provides global macros/substitutions for all sql datasources.
|
||||
var Interpolate = func(query backend.DataQuery, timeRange backend.TimeRange, timeInterval string, sql string) string {
|
||||
interval := query.Interval
|
||||
|
||||
sql = strings.ReplaceAll(sql, "$__interval_ms", strconv.FormatInt(interval.Milliseconds(), 10))
|
||||
sql = strings.ReplaceAll(sql, "$__interval", gtime.FormatInterval(interval))
|
||||
sql = strings.ReplaceAll(sql, "$__unixEpochFrom()", fmt.Sprintf("%d", timeRange.From.UTC().Unix()))
|
||||
sql = strings.ReplaceAll(sql, "$__unixEpochTo()", fmt.Sprintf("%d", timeRange.To.UTC().Unix()))
|
||||
|
||||
return sql
|
||||
}
|
||||
|
||||
func (e *DataSourceHandler) newProcessCfg(query backend.DataQuery, queryContext context.Context,
|
||||
rows *sql.Rows, interpolatedQuery string) (*dataQueryModel, error) {
|
||||
columnNames, err := rows.Columns()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
columnTypes, err := rows.ColumnTypes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qm := &dataQueryModel{
|
||||
columnTypes: columnTypes,
|
||||
columnNames: columnNames,
|
||||
timeIndex: -1,
|
||||
timeEndIndex: -1,
|
||||
metricIndex: -1,
|
||||
metricPrefix: false,
|
||||
queryContext: queryContext,
|
||||
}
|
||||
|
||||
queryJson := QueryJson{}
|
||||
err = json.Unmarshal(query.JSON, &queryJson)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if queryJson.Fill {
|
||||
qm.FillMissing = &data.FillMissing{}
|
||||
qm.Interval = time.Duration(queryJson.FillInterval * float64(time.Second))
|
||||
switch strings.ToLower(queryJson.FillMode) {
|
||||
case "null":
|
||||
qm.FillMissing.Mode = data.FillModeNull
|
||||
case "previous":
|
||||
qm.FillMissing.Mode = data.FillModePrevious
|
||||
case "value":
|
||||
qm.FillMissing.Mode = data.FillModeValue
|
||||
qm.FillMissing.Value = queryJson.FillValue
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
qm.TimeRange.From = query.TimeRange.From.UTC()
|
||||
qm.TimeRange.To = query.TimeRange.To.UTC()
|
||||
|
||||
switch queryJson.Format {
|
||||
case "time_series":
|
||||
qm.Format = dataQueryFormatSeries
|
||||
case "table":
|
||||
qm.Format = dataQueryFormatTable
|
||||
default:
|
||||
panic(fmt.Sprintf("Unrecognized query model format: %q", queryJson.Format))
|
||||
}
|
||||
|
||||
for i, col := range qm.columnNames {
|
||||
for _, tc := range e.timeColumnNames {
|
||||
if col == tc {
|
||||
qm.timeIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if qm.Format == dataQueryFormatTable && col == "timeend" {
|
||||
qm.timeEndIndex = i
|
||||
continue
|
||||
}
|
||||
|
||||
switch col {
|
||||
case "metric":
|
||||
qm.metricIndex = i
|
||||
default:
|
||||
if qm.metricIndex == -1 {
|
||||
columnType := qm.columnTypes[i].DatabaseTypeName()
|
||||
for _, mct := range e.metricColumnTypes {
|
||||
if columnType == mct {
|
||||
qm.metricIndex = i
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
qm.InterpolatedQuery = interpolatedQuery
|
||||
return qm, nil
|
||||
}
|
||||
|
||||
// dataQueryFormat is the type of query.
|
||||
type dataQueryFormat string
|
||||
|
||||
const (
|
||||
// dataQueryFormatTable identifies a table query (default).
|
||||
dataQueryFormatTable dataQueryFormat = "table"
|
||||
// dataQueryFormatSeries identifies a time series query.
|
||||
dataQueryFormatSeries dataQueryFormat = "time_series"
|
||||
)
|
||||
|
||||
type dataQueryModel struct {
|
||||
InterpolatedQuery string // property not set until after Interpolate()
|
||||
Format dataQueryFormat
|
||||
TimeRange backend.TimeRange
|
||||
FillMissing *data.FillMissing // property not set until after Interpolate()
|
||||
Interval time.Duration
|
||||
columnNames []string
|
||||
columnTypes []*sql.ColumnType
|
||||
timeIndex int
|
||||
timeEndIndex int
|
||||
metricIndex int
|
||||
metricPrefix bool
|
||||
queryContext context.Context
|
||||
}
|
||||
|
||||
func convertSQLTimeColumnsToEpochMS(frame *data.Frame, qm *dataQueryModel) error {
|
||||
if qm.timeIndex != -1 {
|
||||
if err := convertSQLTimeColumnToEpochMS(frame, qm.timeIndex); err != nil {
|
||||
return fmt.Errorf("%v: %w", "failed to convert time column", err)
|
||||
}
|
||||
}
|
||||
|
||||
if qm.timeEndIndex != -1 {
|
||||
if err := convertSQLTimeColumnToEpochMS(frame, qm.timeEndIndex); err != nil {
|
||||
return fmt.Errorf("%v: %w", "failed to convert timeend column", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertSQLTimeColumnToEpochMS converts column named time to unix timestamp in milliseconds
|
||||
// to make native datetime types and epoch dates work in annotation and table queries.
|
||||
func convertSQLTimeColumnToEpochMS(frame *data.Frame, timeIndex int) error {
|
||||
if timeIndex < 0 || timeIndex >= len(frame.Fields) {
|
||||
return fmt.Errorf("timeIndex %d is out of range", timeIndex)
|
||||
}
|
||||
|
||||
origin := frame.Fields[timeIndex]
|
||||
valueType := origin.Type()
|
||||
if valueType == data.FieldTypeTime || valueType == data.FieldTypeNullableTime {
|
||||
return nil
|
||||
}
|
||||
|
||||
newField := data.NewFieldFromFieldType(data.FieldTypeNullableTime, 0)
|
||||
newField.Name = origin.Name
|
||||
newField.Labels = origin.Labels
|
||||
|
||||
valueLength := origin.Len()
|
||||
for i := 0; i < valueLength; i++ {
|
||||
v, err := origin.NullableFloatAt(i)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to convert data to a time field")
|
||||
}
|
||||
if v == nil {
|
||||
newField.Append(nil)
|
||||
} else {
|
||||
timestamp := time.Unix(0, int64(epochPrecisionToMS(*v))*int64(time.Millisecond))
|
||||
newField.Append(×tamp)
|
||||
}
|
||||
}
|
||||
frame.Fields[timeIndex] = newField
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertSQLValueColumnToFloat converts timeseries value column to float.
|
||||
func convertSQLValueColumnToFloat(frame *data.Frame, Index int) (*data.Frame, error) {
|
||||
if Index < 0 || Index >= len(frame.Fields) {
|
||||
return frame, fmt.Errorf("metricIndex %d is out of range", Index)
|
||||
}
|
||||
|
||||
origin := frame.Fields[Index]
|
||||
valueType := origin.Type()
|
||||
if valueType == data.FieldTypeFloat64 || valueType == data.FieldTypeNullableFloat64 {
|
||||
return frame, nil
|
||||
}
|
||||
|
||||
newField := data.NewFieldFromFieldType(data.FieldTypeNullableFloat64, origin.Len())
|
||||
newField.Name = origin.Name
|
||||
newField.Labels = origin.Labels
|
||||
|
||||
for i := 0; i < origin.Len(); i++ {
|
||||
v, err := origin.NullableFloatAt(i)
|
||||
if err != nil {
|
||||
return frame, err
|
||||
}
|
||||
newField.Set(i, v)
|
||||
}
|
||||
|
||||
frame.Fields[Index] = newField
|
||||
|
||||
return frame, nil
|
||||
}
|
||||
|
||||
func SetupFillmode(query *backend.DataQuery, interval time.Duration, fillmode string) error {
|
||||
rawQueryProp := make(map[string]any)
|
||||
queryBytes, err := query.JSON.MarshalJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(queryBytes, &rawQueryProp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rawQueryProp["fill"] = true
|
||||
rawQueryProp["fillInterval"] = interval.Seconds()
|
||||
|
||||
switch fillmode {
|
||||
case "NULL":
|
||||
rawQueryProp["fillMode"] = "null"
|
||||
case "previous":
|
||||
rawQueryProp["fillMode"] = "previous"
|
||||
default:
|
||||
rawQueryProp["fillMode"] = "value"
|
||||
floatVal, err := strconv.ParseFloat(fillmode, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing fill value %v", fillmode)
|
||||
}
|
||||
rawQueryProp["fillValue"] = floatVal
|
||||
}
|
||||
query.JSON, err = json.Marshal(rawQueryProp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SQLMacroEngineBase struct{}
|
||||
|
||||
func NewSQLMacroEngineBase() *SQLMacroEngineBase {
|
||||
return &SQLMacroEngineBase{}
|
||||
}
|
||||
|
||||
func (m *SQLMacroEngineBase) ReplaceAllStringSubmatchFunc(re *regexp.Regexp, str string, repl func([]string) string) string {
|
||||
result := ""
|
||||
lastIndex := 0
|
||||
|
||||
for _, v := range re.FindAllStringSubmatchIndex(str, -1) {
|
||||
groups := []string{}
|
||||
for i := 0; i < len(v); i += 2 {
|
||||
groups = append(groups, str[v[i]:v[i+1]])
|
||||
}
|
||||
|
||||
result += str[lastIndex:v[0]] + repl(groups)
|
||||
lastIndex = v[1]
|
||||
}
|
||||
|
||||
return result + str[lastIndex:]
|
||||
}
|
||||
|
||||
// epochPrecisionToMS converts epoch precision to millisecond, if needed.
|
||||
// Only seconds to milliseconds supported right now
|
||||
func epochPrecisionToMS(value float64) float64 {
|
||||
s := strconv.FormatFloat(value, 'e', -1, 64)
|
||||
if strings.HasSuffix(s, "e+09") {
|
||||
return value * float64(1e3)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(s, "e+18") {
|
||||
return value / float64(time.Millisecond)
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
438
pkg/tsdb/mysql/sqleng/sql_engine_test.go
Normal file
438
pkg/tsdb/mysql/sqleng/sql_engine_test.go
Normal file
@ -0,0 +1,438 @@
|
||||
package sqleng
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||
"github.com/grafana/grafana/pkg/tsdb/mysql/sqleng/util"
|
||||
)
|
||||
|
||||
func TestSQLEngine(t *testing.T) {
|
||||
dt := time.Date(2018, 3, 14, 21, 20, 6, int(527345*time.Microsecond), time.UTC)
|
||||
|
||||
t.Run("Handle interpolating $__interval and $__interval_ms", func(t *testing.T) {
|
||||
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
|
||||
to := from.Add(5 * time.Minute)
|
||||
timeRange := backend.TimeRange{From: from, To: to}
|
||||
|
||||
text := "$__interval $__timeGroupAlias(time,$__interval) $__interval_ms"
|
||||
|
||||
t.Run("interpolate 10 minutes $__interval", func(t *testing.T) {
|
||||
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Minute * 10}
|
||||
sql := Interpolate(query, timeRange, "", text)
|
||||
require.Equal(t, "10m $__timeGroupAlias(time,10m) 600000", sql)
|
||||
})
|
||||
|
||||
t.Run("interpolate 4seconds $__interval", func(t *testing.T) {
|
||||
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Second * 4}
|
||||
sql := Interpolate(query, timeRange, "", text)
|
||||
require.Equal(t, "4s $__timeGroupAlias(time,4s) 4000", sql)
|
||||
})
|
||||
|
||||
t.Run("interpolate 200 milliseconds $__interval", func(t *testing.T) {
|
||||
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Millisecond * 200}
|
||||
sql := Interpolate(query, timeRange, "", text)
|
||||
require.Equal(t, "200ms $__timeGroupAlias(time,200ms) 200", sql)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Given a time range between 2018-04-12 00:00 and 2018-04-12 00:05", func(t *testing.T) {
|
||||
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
|
||||
to := from.Add(5 * time.Minute)
|
||||
timeRange := backend.TimeRange{From: from, To: to}
|
||||
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Second * 60}
|
||||
|
||||
t.Run("interpolate __unixEpochFrom function", func(t *testing.T) {
|
||||
sql := Interpolate(query, timeRange, "", "select $__unixEpochFrom()")
|
||||
require.Equal(t, fmt.Sprintf("select %d", from.Unix()), sql)
|
||||
})
|
||||
|
||||
t.Run("interpolate __unixEpochTo function", func(t *testing.T) {
|
||||
sql := Interpolate(query, timeRange, "", "select $__unixEpochTo()")
|
||||
require.Equal(t, fmt.Sprintf("select %d", to.Unix()), sql)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Given row values with int64 as time columns", func(t *testing.T) {
|
||||
tSeconds := dt.Unix()
|
||||
tMilliseconds := dt.UnixNano() / 1e6
|
||||
tNanoSeconds := dt.UnixNano()
|
||||
var nilPointer *int64
|
||||
|
||||
originFrame := data.NewFrame("",
|
||||
data.NewField("time1", nil, []int64{
|
||||
tSeconds,
|
||||
}),
|
||||
data.NewField("time2", nil, []*int64{
|
||||
util.Pointer(tSeconds),
|
||||
}),
|
||||
data.NewField("time3", nil, []int64{
|
||||
tMilliseconds,
|
||||
}),
|
||||
data.NewField("time4", nil, []*int64{
|
||||
util.Pointer(tMilliseconds),
|
||||
}),
|
||||
data.NewField("time5", nil, []int64{
|
||||
tNanoSeconds,
|
||||
}),
|
||||
data.NewField("time6", nil, []*int64{
|
||||
util.Pointer(tNanoSeconds),
|
||||
}),
|
||||
data.NewField("time7", nil, []*int64{
|
||||
nilPointer,
|
||||
}),
|
||||
)
|
||||
|
||||
for i := 0; i < len(originFrame.Fields); i++ {
|
||||
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix())
|
||||
require.Nil(t, originFrame.Fields[6].At(0))
|
||||
})
|
||||
|
||||
t.Run("Given row values with uint64 as time columns", func(t *testing.T) {
|
||||
tSeconds := uint64(dt.Unix())
|
||||
tMilliseconds := uint64(dt.UnixNano() / 1e6)
|
||||
tNanoSeconds := uint64(dt.UnixNano())
|
||||
var nilPointer *uint64
|
||||
|
||||
originFrame := data.NewFrame("",
|
||||
data.NewField("time1", nil, []uint64{
|
||||
tSeconds,
|
||||
}),
|
||||
data.NewField("time2", nil, []*uint64{
|
||||
util.Pointer(tSeconds),
|
||||
}),
|
||||
data.NewField("time3", nil, []uint64{
|
||||
tMilliseconds,
|
||||
}),
|
||||
data.NewField("time4", nil, []*uint64{
|
||||
util.Pointer(tMilliseconds),
|
||||
}),
|
||||
data.NewField("time5", nil, []uint64{
|
||||
tNanoSeconds,
|
||||
}),
|
||||
data.NewField("time6", nil, []*uint64{
|
||||
util.Pointer(tNanoSeconds),
|
||||
}),
|
||||
data.NewField("time7", nil, []*uint64{
|
||||
nilPointer,
|
||||
}),
|
||||
)
|
||||
|
||||
for i := 0; i < len(originFrame.Fields); i++ {
|
||||
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix())
|
||||
require.Nil(t, originFrame.Fields[6].At(0))
|
||||
})
|
||||
|
||||
t.Run("Given row values with int32 as time columns", func(t *testing.T) {
|
||||
tSeconds := int32(dt.Unix())
|
||||
var nilInt *int32
|
||||
|
||||
originFrame := data.NewFrame("",
|
||||
data.NewField("time1", nil, []int32{
|
||||
tSeconds,
|
||||
}),
|
||||
data.NewField("time2", nil, []*int32{
|
||||
util.Pointer(tSeconds),
|
||||
}),
|
||||
data.NewField("time7", nil, []*int32{
|
||||
nilInt,
|
||||
}),
|
||||
)
|
||||
for i := 0; i < 3; i++ {
|
||||
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||
require.Nil(t, originFrame.Fields[2].At(0))
|
||||
})
|
||||
|
||||
t.Run("Given row values with uint32 as time columns", func(t *testing.T) {
|
||||
tSeconds := uint32(dt.Unix())
|
||||
var nilInt *uint32
|
||||
|
||||
originFrame := data.NewFrame("",
|
||||
data.NewField("time1", nil, []uint32{
|
||||
tSeconds,
|
||||
}),
|
||||
data.NewField("time2", nil, []*uint32{
|
||||
util.Pointer(tSeconds),
|
||||
}),
|
||||
data.NewField("time7", nil, []*uint32{
|
||||
nilInt,
|
||||
}),
|
||||
)
|
||||
for i := 0; i < len(originFrame.Fields); i++ {
|
||||
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||
require.Nil(t, originFrame.Fields[2].At(0))
|
||||
})
|
||||
|
||||
t.Run("Given row values with float64 as time columns", func(t *testing.T) {
|
||||
tSeconds := float64(dt.UnixNano()) / float64(time.Second)
|
||||
tMilliseconds := float64(dt.UnixNano()) / float64(time.Millisecond)
|
||||
tNanoSeconds := float64(dt.UnixNano())
|
||||
var nilPointer *float64
|
||||
|
||||
originFrame := data.NewFrame("",
|
||||
data.NewField("time1", nil, []float64{
|
||||
tSeconds,
|
||||
}),
|
||||
data.NewField("time2", nil, []*float64{
|
||||
util.Pointer(tSeconds),
|
||||
}),
|
||||
data.NewField("time3", nil, []float64{
|
||||
tMilliseconds,
|
||||
}),
|
||||
data.NewField("time4", nil, []*float64{
|
||||
util.Pointer(tMilliseconds),
|
||||
}),
|
||||
data.NewField("time5", nil, []float64{
|
||||
tNanoSeconds,
|
||||
}),
|
||||
data.NewField("time6", nil, []*float64{
|
||||
util.Pointer(tNanoSeconds),
|
||||
}),
|
||||
data.NewField("time7", nil, []*float64{
|
||||
nilPointer,
|
||||
}),
|
||||
)
|
||||
|
||||
for i := 0; i < len(originFrame.Fields); i++ {
|
||||
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix())
|
||||
require.Nil(t, originFrame.Fields[6].At(0))
|
||||
})
|
||||
|
||||
t.Run("Given row values with float32 as time columns", func(t *testing.T) {
|
||||
tSeconds := float32(dt.Unix())
|
||||
var nilInt *float32
|
||||
|
||||
originFrame := data.NewFrame("",
|
||||
data.NewField("time1", nil, []float32{
|
||||
tSeconds,
|
||||
}),
|
||||
data.NewField("time2", nil, []*float32{
|
||||
util.Pointer(tSeconds),
|
||||
}),
|
||||
data.NewField("time7", nil, []*float32{
|
||||
nilInt,
|
||||
}),
|
||||
)
|
||||
for i := 0; i < len(originFrame.Fields); i++ {
|
||||
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, int64(tSeconds), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||
require.Equal(t, int64(tSeconds), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||
require.Nil(t, originFrame.Fields[2].At(0))
|
||||
})
|
||||
|
||||
t.Run("Given row with value columns, would be converted to float64", func(t *testing.T) {
|
||||
originFrame := data.NewFrame("",
|
||||
data.NewField("value1", nil, []int64{
|
||||
int64(1),
|
||||
}),
|
||||
data.NewField("value2", nil, []*int64{
|
||||
util.Pointer(int64(1)),
|
||||
}),
|
||||
data.NewField("value3", nil, []int32{
|
||||
int32(1),
|
||||
}),
|
||||
data.NewField("value4", nil, []*int32{
|
||||
util.Pointer(int32(1)),
|
||||
}),
|
||||
data.NewField("value5", nil, []int16{
|
||||
int16(1),
|
||||
}),
|
||||
data.NewField("value6", nil, []*int16{
|
||||
util.Pointer(int16(1)),
|
||||
}),
|
||||
data.NewField("value7", nil, []int8{
|
||||
int8(1),
|
||||
}),
|
||||
data.NewField("value8", nil, []*int8{
|
||||
util.Pointer(int8(1)),
|
||||
}),
|
||||
data.NewField("value9", nil, []float64{
|
||||
float64(1),
|
||||
}),
|
||||
data.NewField("value10", nil, []*float64{
|
||||
util.Pointer(1.0),
|
||||
}),
|
||||
data.NewField("value11", nil, []float32{
|
||||
float32(1),
|
||||
}),
|
||||
data.NewField("value12", nil, []*float32{
|
||||
util.Pointer(float32(1)),
|
||||
}),
|
||||
data.NewField("value13", nil, []uint64{
|
||||
uint64(1),
|
||||
}),
|
||||
data.NewField("value14", nil, []*uint64{
|
||||
util.Pointer(uint64(1)),
|
||||
}),
|
||||
data.NewField("value15", nil, []uint32{
|
||||
uint32(1),
|
||||
}),
|
||||
data.NewField("value16", nil, []*uint32{
|
||||
util.Pointer(uint32(1)),
|
||||
}),
|
||||
data.NewField("value17", nil, []uint16{
|
||||
uint16(1),
|
||||
}),
|
||||
data.NewField("value18", nil, []*uint16{
|
||||
util.Pointer(uint16(1)),
|
||||
}),
|
||||
data.NewField("value19", nil, []uint8{
|
||||
uint8(1),
|
||||
}),
|
||||
data.NewField("value20", nil, []*uint8{
|
||||
util.Pointer(uint8(1)),
|
||||
}),
|
||||
)
|
||||
for i := 0; i < len(originFrame.Fields); i++ {
|
||||
_, err := convertSQLValueColumnToFloat(originFrame, i)
|
||||
require.NoError(t, err)
|
||||
if i == 8 {
|
||||
require.Equal(t, float64(1), originFrame.Fields[i].At(0).(float64))
|
||||
} else {
|
||||
require.NotNil(t, originFrame.Fields[i].At(0).(*float64))
|
||||
require.Equal(t, float64(1), *originFrame.Fields[i].At(0).(*float64))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Given row with nil value columns", func(t *testing.T) {
|
||||
var int64NilPointer *int64
|
||||
var int32NilPointer *int32
|
||||
var int16NilPointer *int16
|
||||
var int8NilPointer *int8
|
||||
var float64NilPointer *float64
|
||||
var float32NilPointer *float32
|
||||
var uint64NilPointer *uint64
|
||||
var uint32NilPointer *uint32
|
||||
var uint16NilPointer *uint16
|
||||
var uint8NilPointer *uint8
|
||||
|
||||
originFrame := data.NewFrame("",
|
||||
data.NewField("value1", nil, []*int64{
|
||||
int64NilPointer,
|
||||
}),
|
||||
data.NewField("value2", nil, []*int32{
|
||||
int32NilPointer,
|
||||
}),
|
||||
data.NewField("value3", nil, []*int16{
|
||||
int16NilPointer,
|
||||
}),
|
||||
data.NewField("value4", nil, []*int8{
|
||||
int8NilPointer,
|
||||
}),
|
||||
data.NewField("value5", nil, []*float64{
|
||||
float64NilPointer,
|
||||
}),
|
||||
data.NewField("value6", nil, []*float32{
|
||||
float32NilPointer,
|
||||
}),
|
||||
data.NewField("value7", nil, []*uint64{
|
||||
uint64NilPointer,
|
||||
}),
|
||||
data.NewField("value8", nil, []*uint32{
|
||||
uint32NilPointer,
|
||||
}),
|
||||
data.NewField("value9", nil, []*uint16{
|
||||
uint16NilPointer,
|
||||
}),
|
||||
data.NewField("value10", nil, []*uint8{
|
||||
uint8NilPointer,
|
||||
}),
|
||||
)
|
||||
for i := 0; i < len(originFrame.Fields); i++ {
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, err := convertSQLValueColumnToFloat(originFrame, i)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, originFrame.Fields[i].At(0))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Should not return raw connection errors", func(t *testing.T) {
|
||||
err := net.OpError{Op: "Dial", Err: fmt.Errorf("inner-error")}
|
||||
transformer := &testQueryResultTransformer{}
|
||||
dp := DataSourceHandler{
|
||||
log: backend.NewLoggerWith("logger", "test"),
|
||||
queryResultTransformer: transformer,
|
||||
}
|
||||
resultErr := dp.TransformQueryError(dp.log, &err)
|
||||
assert.False(t, transformer.transformQueryErrorWasCalled)
|
||||
errorText := resultErr.Error()
|
||||
assert.NotEqual(t, err, resultErr)
|
||||
assert.NotContains(t, errorText, "inner-error")
|
||||
assert.Contains(t, errorText, "failed to connect to server")
|
||||
})
|
||||
|
||||
t.Run("Should return non-connection errors unmodified", func(t *testing.T) {
|
||||
err := fmt.Errorf("normal error")
|
||||
transformer := &testQueryResultTransformer{}
|
||||
dp := DataSourceHandler{
|
||||
log: backend.NewLoggerWith("logger", "test"),
|
||||
queryResultTransformer: transformer,
|
||||
}
|
||||
resultErr := dp.TransformQueryError(dp.log, err)
|
||||
assert.True(t, transformer.transformQueryErrorWasCalled)
|
||||
assert.Equal(t, err, resultErr)
|
||||
assert.ErrorIs(t, err, resultErr)
|
||||
})
|
||||
}
|
||||
|
||||
type testQueryResultTransformer struct {
|
||||
transformQueryErrorWasCalled bool
|
||||
}
|
||||
|
||||
func (t *testQueryResultTransformer) TransformQueryError(_ log.Logger, err error) error {
|
||||
t.transformQueryErrorWasCalled = true
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *testQueryResultTransformer) GetConverterList() []sqlutil.StringConverter {
|
||||
return nil
|
||||
}
|
3
pkg/tsdb/mysql/sqleng/util/util.go
Normal file
3
pkg/tsdb/mysql/sqleng/util/util.go
Normal file
@ -0,0 +1,3 @@
|
||||
package util
|
||||
|
||||
func Pointer[T any](v T) *T { return &v }
|
Loading…
Reference in New Issue
Block a user