mirror of
https://github.com/grafana/grafana.git
synced 2024-12-28 18:01:40 -06:00
Live: declarative processing pipeline, input -> frames -> output, toggle protected POC (#37644)
This commit is contained in:
parent
d19f33b52f
commit
3ee861f57e
7
go.mod
7
go.mod
@ -28,6 +28,7 @@ require (
|
||||
github.com/crewjam/saml v0.4.6-0.20201227203850-bca570abb2ce
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20200910202707-1e08a3fab204
|
||||
github.com/dop251/goja v0.0.0-20210804101310-32956a348b49
|
||||
github.com/fatih/color v1.10.0
|
||||
github.com/gchaincl/sqlhooks v1.3.0
|
||||
github.com/getsentry/sentry-go v0.10.0
|
||||
@ -39,7 +40,9 @@ require (
|
||||
github.com/go-stack/stack v1.8.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/gofrs/uuid v4.0.0+incompatible
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/google/wire v0.5.0
|
||||
@ -67,6 +70,7 @@ require (
|
||||
github.com/mattn/go-sqlite3 v1.14.7
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/ohler55/ojg v1.12.3
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
|
||||
@ -137,6 +141,7 @@ require (
|
||||
github.com/cockroachdb/apd/v2 v2.0.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.3.13 // indirect
|
||||
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
@ -154,12 +159,10 @@ require (
|
||||
github.com/go-openapi/swag v0.19.15 // indirect
|
||||
github.com/go-openapi/validate v0.20.2 // indirect
|
||||
github.com/gogo/googleapis v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/gogo/status v1.0.3 // indirect
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gomodule/redigo v2.0.0+incompatible // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/flatbuffers v1.12.0 // indirect
|
||||
|
10
go.sum
10
go.sum
@ -479,6 +479,8 @@ github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2x
|
||||
github.com/digitalocean/godo v1.62.0 h1:7Gw2KFsWkxl36qJa0s50tgXaE0Cgm51JdRP+MFQvNnM=
|
||||
github.com/digitalocean/godo v1.62.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E=
|
||||
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
@ -503,6 +505,9 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
|
||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dop251/goja v0.0.0-20210804101310-32956a348b49 h1:CtSi0QlA2Hy+nOh8JAZoiEBLW5pliAiKJ3l1Iq1472I=
|
||||
github.com/dop251/goja v0.0.0-20210804101310-32956a348b49/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
|
||||
github.com/drone/envsubst v1.0.2/go.mod h1:bkZbnc/2vh1M12Ecn7EYScpI4YGYU0etwLJICOWi8Z0=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
@ -1064,7 +1069,10 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv
|
||||
github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU=
|
||||
github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-connlimit v0.3.0/go.mod h1:OUj9FGL1tPIhl/2RCfzYHrIiWj+VVPGNyVPnUX8AqS0=
|
||||
github.com/hashicorp/go-discover v0.0.0-20200501174627-ad1e96bde088/go.mod h1:vZu6Opqf49xX5lsFAu7iFNewkcVF1sn/wyapZh5ytlg=
|
||||
@ -1557,6 +1565,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
|
||||
github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/ohler55/ojg v1.12.3 h1:bEBT11J3azRlzQnUQYxTUVMzGAJHfv/z1fjxOLBcXXM=
|
||||
github.com/ohler55/ojg v1.12.3/go.mod h1:DipxaGtQkxd8U67rc3s5ugRGmaHQW7YfJlN7xAaXu5U=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
||||
|
@ -422,7 +422,7 @@ func (hs *HTTPServer) registerRoutes() {
|
||||
// the channel path is in the name
|
||||
liveRoute.Post("/publish", bind(dtos.LivePublishCmd{}), routing.Wrap(hs.Live.HandleHTTPPublish))
|
||||
|
||||
// POST influx line protocol
|
||||
// POST influx line protocol.
|
||||
liveRoute.Post("/push/:streamId", hs.LivePushGateway.Handle)
|
||||
|
||||
// List available streams and fields
|
||||
@ -430,6 +430,13 @@ func (hs *HTTPServer) registerRoutes() {
|
||||
|
||||
// Some channels may have info
|
||||
liveRoute.Get("/info/*", routing.Wrap(hs.Live.HandleInfoHTTP))
|
||||
|
||||
if hs.Cfg.FeatureToggles["live-pipeline"] {
|
||||
// POST Live data to be processed according to channel rules.
|
||||
liveRoute.Post("/push/:streamId/:path", hs.LivePushGateway.HandlePath)
|
||||
liveRoute.Get("/channel-rules", routing.Wrap(hs.Live.HandleChannelRulesListHTTP), reqOrgAdmin)
|
||||
liveRoute.Get("/remote-write-backends", routing.Wrap(hs.Live.HandleRemoteWriteBackendsListHTTP), reqOrgAdmin)
|
||||
}
|
||||
})
|
||||
|
||||
// short urls
|
||||
|
@ -6,12 +6,11 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api/dtos"
|
||||
"github.com/grafana/grafana/pkg/api/response"
|
||||
"github.com/grafana/grafana/pkg/api/routing"
|
||||
@ -28,6 +27,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/live/liveplugin"
|
||||
"github.com/grafana/grafana/pkg/services/live/managedstream"
|
||||
"github.com/grafana/grafana/pkg/services/live/orgchannel"
|
||||
"github.com/grafana/grafana/pkg/services/live/pipeline"
|
||||
"github.com/grafana/grafana/pkg/services/live/pushws"
|
||||
"github.com/grafana/grafana/pkg/services/live/runstream"
|
||||
"github.com/grafana/grafana/pkg/services/live/survey"
|
||||
@ -37,6 +37,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
|
||||
"github.com/centrifugal/centrifuge"
|
||||
"github.com/gobwas/glob"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/live"
|
||||
"gopkg.in/redis.v5"
|
||||
@ -137,20 +138,7 @@ func ProvideService(plugCtxProvider *plugincontext.Provider, cfg *setting.Cfg, r
|
||||
node.SetPresenceManager(presenceManager)
|
||||
}
|
||||
|
||||
g.contextGetter = liveplugin.NewContextGetter(g.PluginContextProvider)
|
||||
channelLocalPublisher := liveplugin.NewChannelLocalPublisher(node)
|
||||
numLocalSubscribersGetter := liveplugin.NewNumLocalSubscribersGetter(node)
|
||||
g.runStreamManager = runstream.NewManager(channelLocalPublisher, numLocalSubscribersGetter, g.contextGetter)
|
||||
|
||||
// Initialize the main features
|
||||
dash := &features.DashboardHandler{
|
||||
Publisher: g.Publish,
|
||||
ClientCount: g.ClientCount,
|
||||
}
|
||||
g.storage = database.NewStorage(g.SQLStore, g.CacheService)
|
||||
g.GrafanaScope.Dashboards = dash
|
||||
g.GrafanaScope.Features["dashboard"] = dash
|
||||
g.GrafanaScope.Features["broadcast"] = features.NewBroadcastRunner(g.storage)
|
||||
channelLocalPublisher := liveplugin.NewChannelLocalPublisher(node, nil)
|
||||
|
||||
var managedStreamRunner *managedstream.Runner
|
||||
if g.IsHA() {
|
||||
@ -163,16 +151,58 @@ func ProvideService(plugCtxProvider *plugincontext.Provider, cfg *setting.Cfg, r
|
||||
}
|
||||
managedStreamRunner = managedstream.NewRunner(
|
||||
g.Publish,
|
||||
channelLocalPublisher,
|
||||
managedstream.NewRedisFrameCache(redisClient),
|
||||
)
|
||||
} else {
|
||||
managedStreamRunner = managedstream.NewRunner(
|
||||
g.Publish,
|
||||
channelLocalPublisher,
|
||||
managedstream.NewMemoryFrameCache(),
|
||||
)
|
||||
}
|
||||
|
||||
g.ManagedStreamRunner = managedStreamRunner
|
||||
if enabled := g.Cfg.FeatureToggles["live-pipeline"]; enabled {
|
||||
var builder pipeline.RuleBuilder
|
||||
if os.Getenv("GF_LIVE_DEV_BUILDER") != "" {
|
||||
builder = &pipeline.DevRuleBuilder{
|
||||
Node: node,
|
||||
ManagedStream: g.ManagedStreamRunner,
|
||||
FrameStorage: pipeline.NewFrameStorage(),
|
||||
}
|
||||
} else {
|
||||
storage := &pipeline.FileStorage{}
|
||||
g.channelRuleStorage = storage
|
||||
builder = &pipeline.StorageRuleBuilder{
|
||||
Node: node,
|
||||
ManagedStream: g.ManagedStreamRunner,
|
||||
FrameStorage: pipeline.NewFrameStorage(),
|
||||
RuleStorage: storage,
|
||||
}
|
||||
}
|
||||
channelRuleGetter := pipeline.NewCacheSegmentedTree(builder)
|
||||
g.Pipeline, err = pipeline.New(channelRuleGetter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
g.contextGetter = liveplugin.NewContextGetter(g.PluginContextProvider)
|
||||
pipelinedChannelLocalPublisher := liveplugin.NewChannelLocalPublisher(node, g.Pipeline)
|
||||
numLocalSubscribersGetter := liveplugin.NewNumLocalSubscribersGetter(node)
|
||||
g.runStreamManager = runstream.NewManager(pipelinedChannelLocalPublisher, numLocalSubscribersGetter, g.contextGetter)
|
||||
|
||||
// Initialize the main features
|
||||
dash := &features.DashboardHandler{
|
||||
Publisher: g.Publish,
|
||||
ClientCount: g.ClientCount,
|
||||
}
|
||||
g.storage = database.NewStorage(g.SQLStore, g.CacheService)
|
||||
g.GrafanaScope.Dashboards = dash
|
||||
g.GrafanaScope.Features["dashboard"] = dash
|
||||
g.GrafanaScope.Features["broadcast"] = features.NewBroadcastRunner(g.storage)
|
||||
|
||||
g.surveyCaller = survey.NewCaller(managedStreamRunner, node)
|
||||
err = g.surveyCaller.SetupHandlers()
|
||||
if err != nil {
|
||||
@ -323,6 +353,8 @@ type GrafanaLive struct {
|
||||
GrafanaScope CoreGrafanaScope
|
||||
|
||||
ManagedStreamRunner *managedstream.Runner
|
||||
Pipeline *pipeline.Pipeline
|
||||
channelRuleStorage pipeline.RuleStorage
|
||||
|
||||
contextGetter *liveplugin.ContextGetter
|
||||
runStreamManager *runstream.Manager
|
||||
@ -354,6 +386,10 @@ func (g *GrafanaLive) Run(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GrafanaLive) ChannelRuleStorage() pipeline.RuleStorage {
|
||||
return g.channelRuleStorage
|
||||
}
|
||||
|
||||
func getCheckOriginFunc(appURL *url.URL, originPatterns []string, originGlobs []glob.Glob) func(r *http.Request) bool {
|
||||
return func(r *http.Request) bool {
|
||||
origin := r.Header.Get("Origin")
|
||||
@ -679,7 +715,7 @@ func (g *GrafanaLive) handlePluginScope(_ *models.SignedInUser, namespace string
|
||||
}
|
||||
|
||||
func (g *GrafanaLive) handleStreamScope(u *models.SignedInUser, namespace string) (models.ChannelHandlerFactory, error) {
|
||||
return g.ManagedStreamRunner.GetOrCreateStream(u.OrgId, namespace)
|
||||
return g.ManagedStreamRunner.GetOrCreateStream(u.OrgId, live.ScopeStream, namespace)
|
||||
}
|
||||
|
||||
func (g *GrafanaLive) handleDatasourceScope(user *models.SignedInUser, namespace string) (models.ChannelHandlerFactory, error) {
|
||||
@ -791,6 +827,28 @@ func (g *GrafanaLive) HandleInfoHTTP(ctx *models.ReqContext) response.Response {
|
||||
})
|
||||
}
|
||||
|
||||
// HandleChannelRulesListHTTP ...
|
||||
func (g *GrafanaLive) HandleChannelRulesListHTTP(c *models.ReqContext) response.Response {
|
||||
result, err := g.channelRuleStorage.ListChannelRules(c.Req.Context(), c.OrgId)
|
||||
if err != nil {
|
||||
return response.Error(http.StatusInternalServerError, "Failed to get channel rules", err)
|
||||
}
|
||||
return response.JSON(http.StatusOK, util.DynMap{
|
||||
"rules": result,
|
||||
})
|
||||
}
|
||||
|
||||
// HandleRemoteWriteBackendsListHTTP ...
|
||||
func (g *GrafanaLive) HandleRemoteWriteBackendsListHTTP(c *models.ReqContext) response.Response {
|
||||
result, err := g.channelRuleStorage.ListRemoteWriteBackends(c.Req.Context(), c.OrgId)
|
||||
if err != nil {
|
||||
return response.Error(http.StatusInternalServerError, "Failed to get channel rules", err)
|
||||
}
|
||||
return response.JSON(http.StatusOK, util.DynMap{
|
||||
"remoteWriteBackends": result,
|
||||
})
|
||||
}
|
||||
|
||||
// Write to the standard log15 logger
|
||||
func handleLog(msg centrifuge.LogEntry) {
|
||||
arr := make([]interface{}, 0)
|
||||
|
@ -1,24 +1,42 @@
|
||||
package liveplugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/plugins/plugincontext"
|
||||
"github.com/grafana/grafana/pkg/services/live/orgchannel"
|
||||
"github.com/grafana/grafana/pkg/services/live/pipeline"
|
||||
|
||||
"github.com/centrifugal/centrifuge"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
)
|
||||
|
||||
type ChannelLocalPublisher struct {
|
||||
node *centrifuge.Node
|
||||
node *centrifuge.Node
|
||||
pipeline *pipeline.Pipeline
|
||||
}
|
||||
|
||||
func NewChannelLocalPublisher(node *centrifuge.Node) *ChannelLocalPublisher {
|
||||
return &ChannelLocalPublisher{node: node}
|
||||
func NewChannelLocalPublisher(node *centrifuge.Node, pipeline *pipeline.Pipeline) *ChannelLocalPublisher {
|
||||
return &ChannelLocalPublisher{node: node, pipeline: pipeline}
|
||||
}
|
||||
|
||||
func (p *ChannelLocalPublisher) PublishLocal(channel string, data []byte) error {
|
||||
if p.pipeline != nil {
|
||||
orgID, channelID, err := orgchannel.StripOrgID(channel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ok, err := p.pipeline.ProcessInput(context.Background(), orgID, channelID, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
// if rule found – we are done here. If not - fall through and process as usual.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
pub := ¢rifuge.Publication{
|
||||
Data: data,
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
type FrameCache interface {
|
||||
// GetActiveChannels returns active managed stream channels with JSON schema.
|
||||
GetActiveChannels(orgID int64) (map[string]json.RawMessage, error)
|
||||
// GetFrame returns full JSON frame for a path.
|
||||
// GetFrame returns full JSON frame for a channel in org.
|
||||
GetFrame(orgID int64, channel string) (json.RawMessage, bool, error)
|
||||
// Update updates frame cache and returns true if schema changed.
|
||||
Update(orgID int64, channel string, frameJson data.FrameJSONCache) (bool, error)
|
||||
|
@ -8,6 +8,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/live/orgchannel"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/live"
|
||||
@ -19,20 +21,39 @@ var (
|
||||
logger = log.New("live.managed_stream")
|
||||
)
|
||||
|
||||
// Runner keeps ManagedStream per streamID.
|
||||
// If message comes from a plugin:
|
||||
// * it's simply sent to local subscribers without any additional steps
|
||||
// * if there is RULE then may be processed in some way
|
||||
// * important to keep a message in the original channel
|
||||
// * client subscribed to ds/<UID>/xxx
|
||||
//
|
||||
// What we want to build:
|
||||
// * Stream scope not hardcoded and determined by the caller
|
||||
// * So it's possible to use managed stream from plugins
|
||||
// * The problem is HA – at moment several plugins on different nodes publish same messages
|
||||
// * Can use in-memory managed stream for plugins with local subscribers publish, use HA-managed stream for HTTP/WS
|
||||
// * Eventually maintain a single connection with a plugin over a channel leader selection.
|
||||
|
||||
// Runner keeps NamespaceStream per namespace.
|
||||
type Runner struct {
|
||||
mu sync.RWMutex
|
||||
streams map[int64]map[string]*ManagedStream
|
||||
publisher models.ChannelPublisher
|
||||
frameCache FrameCache
|
||||
mu sync.RWMutex
|
||||
streams map[int64]map[string]*NamespaceStream
|
||||
publisher models.ChannelPublisher
|
||||
localPublisher LocalPublisher
|
||||
frameCache FrameCache
|
||||
}
|
||||
|
||||
type LocalPublisher interface {
|
||||
PublishLocal(channel string, data []byte) error
|
||||
}
|
||||
|
||||
// NewRunner creates new Runner.
|
||||
func NewRunner(publisher models.ChannelPublisher, frameCache FrameCache) *Runner {
|
||||
func NewRunner(publisher models.ChannelPublisher, localPublisher LocalPublisher, frameCache FrameCache) *Runner {
|
||||
return &Runner{
|
||||
publisher: publisher,
|
||||
streams: map[int64]map[string]*ManagedStream{},
|
||||
frameCache: frameCache,
|
||||
publisher: publisher,
|
||||
localPublisher: localPublisher,
|
||||
streams: map[int64]map[string]*NamespaceStream{},
|
||||
frameCache: frameCache,
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,7 +70,8 @@ func (r *Runner) GetManagedChannels(orgID int64) ([]*ManagedChannel, error) {
|
||||
}
|
||||
// Enrich with minute rate.
|
||||
channel, _ := live.ParseChannel(managedChannel.Channel)
|
||||
namespaceStream, ok := r.streams[orgID][channel.Namespace]
|
||||
prefix := channel.Scope + "/" + channel.Namespace
|
||||
namespaceStream, ok := r.streams[orgID][prefix]
|
||||
if ok {
|
||||
managedChannel.MinuteRate = namespaceStream.minuteRate(channel.Path)
|
||||
}
|
||||
@ -86,46 +108,34 @@ func (r *Runner) GetManagedChannels(orgID int64) ([]*ManagedChannel, error) {
|
||||
return channels, nil
|
||||
}
|
||||
|
||||
// Streams returns a map of active managed streams (per streamID).
|
||||
func (r *Runner) Streams(orgID int64) map[string]*ManagedStream {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if _, ok := r.streams[orgID]; !ok {
|
||||
return map[string]*ManagedStream{}
|
||||
}
|
||||
streams := make(map[string]*ManagedStream, len(r.streams[orgID]))
|
||||
for k, v := range r.streams[orgID] {
|
||||
streams[k] = v
|
||||
}
|
||||
return streams
|
||||
}
|
||||
|
||||
// GetOrCreateStream -- for now this will create new manager for each key.
|
||||
// Eventually, the stream behavior will need to be configured explicitly
|
||||
func (r *Runner) GetOrCreateStream(orgID int64, streamID string) (*ManagedStream, error) {
|
||||
func (r *Runner) GetOrCreateStream(orgID int64, scope string, namespace string) (*NamespaceStream, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
_, ok := r.streams[orgID]
|
||||
if !ok {
|
||||
r.streams[orgID] = map[string]*ManagedStream{}
|
||||
r.streams[orgID] = map[string]*NamespaceStream{}
|
||||
}
|
||||
s, ok := r.streams[orgID][streamID]
|
||||
prefix := scope + "/" + namespace
|
||||
s, ok := r.streams[orgID][prefix]
|
||||
if !ok {
|
||||
s = NewManagedStream(streamID, orgID, r.publisher, r.frameCache)
|
||||
r.streams[orgID][streamID] = s
|
||||
s = NewNamespaceStream(orgID, scope, namespace, r.publisher, r.localPublisher, r.frameCache)
|
||||
r.streams[orgID][prefix] = s
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// ManagedStream holds the state of a managed stream.
|
||||
type ManagedStream struct {
|
||||
id string
|
||||
orgID int64
|
||||
start time.Time
|
||||
publisher models.ChannelPublisher
|
||||
frameCache FrameCache
|
||||
rateMu sync.RWMutex
|
||||
rates map[string][60]rateEntry
|
||||
// NamespaceStream holds the state of a managed stream.
|
||||
type NamespaceStream struct {
|
||||
orgID int64
|
||||
scope string
|
||||
namespace string
|
||||
publisher models.ChannelPublisher
|
||||
localPublisher LocalPublisher
|
||||
frameCache FrameCache
|
||||
rateMu sync.RWMutex
|
||||
rates map[string][60]rateEntry
|
||||
}
|
||||
|
||||
type rateEntry struct {
|
||||
@ -133,18 +143,6 @@ type rateEntry struct {
|
||||
count int32
|
||||
}
|
||||
|
||||
// NewManagedStream creates new ManagedStream.
|
||||
func NewManagedStream(id string, orgID int64, publisher models.ChannelPublisher, schemaUpdater FrameCache) *ManagedStream {
|
||||
return &ManagedStream{
|
||||
id: id,
|
||||
orgID: orgID,
|
||||
start: time.Now(),
|
||||
publisher: publisher,
|
||||
frameCache: schemaUpdater,
|
||||
rates: map[string][60]rateEntry{},
|
||||
}
|
||||
}
|
||||
|
||||
// ManagedChannel represents a managed stream.
|
||||
type ManagedChannel struct {
|
||||
Channel string `json:"channel"`
|
||||
@ -152,16 +150,30 @@ type ManagedChannel struct {
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
// NewNamespaceStream creates new NamespaceStream.
|
||||
func NewNamespaceStream(orgID int64, scope string, namespace string, publisher models.ChannelPublisher, localPublisher LocalPublisher, schemaUpdater FrameCache) *NamespaceStream {
|
||||
return &NamespaceStream{
|
||||
orgID: orgID,
|
||||
scope: scope,
|
||||
namespace: namespace,
|
||||
publisher: publisher,
|
||||
localPublisher: localPublisher,
|
||||
frameCache: schemaUpdater,
|
||||
rates: map[string][60]rateEntry{},
|
||||
}
|
||||
}
|
||||
|
||||
// Push sends frame to the stream and saves it for later retrieval by subscribers.
|
||||
// unstableSchema flag can be set to disable schema caching for a path.
|
||||
func (s *ManagedStream) Push(path string, frame *data.Frame) error {
|
||||
// * Saves the entire frame to cache.
|
||||
// * If schema has been changed sends entire frame to channel, otherwise only data.
|
||||
func (s *NamespaceStream) Push(path string, frame *data.Frame) error {
|
||||
jsonFrameCache, err := data.FrameToJSONCache(frame)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The channel this will be posted into.
|
||||
channel := live.Channel{Scope: live.ScopeStream, Namespace: s.id, Path: path}.String()
|
||||
channel := live.Channel{Scope: s.scope, Namespace: s.namespace, Path: path}.String()
|
||||
|
||||
isUpdated, err := s.frameCache.Update(s.orgID, channel, jsonFrameCache)
|
||||
if err != nil {
|
||||
@ -179,10 +191,13 @@ func (s *ManagedStream) Push(path string, frame *data.Frame) error {
|
||||
|
||||
logger.Debug("Publish data to channel", "channel", channel, "dataLength", len(frameJSON))
|
||||
s.incRate(path, time.Now().Unix())
|
||||
if s.scope == live.ScopeDatasource || s.scope == live.ScopePlugin {
|
||||
return s.localPublisher.PublishLocal(orgchannel.PrependOrgID(s.orgID, channel), frameJSON)
|
||||
}
|
||||
return s.publisher(s.orgID, channel, frameJSON)
|
||||
}
|
||||
|
||||
func (s *ManagedStream) incRate(path string, nowUnix int64) {
|
||||
func (s *NamespaceStream) incRate(path string, nowUnix int64) {
|
||||
s.rateMu.Lock()
|
||||
pathRate, ok := s.rates[path]
|
||||
if !ok {
|
||||
@ -199,7 +214,7 @@ func (s *ManagedStream) incRate(path string, nowUnix int64) {
|
||||
s.rateMu.Unlock()
|
||||
}
|
||||
|
||||
func (s *ManagedStream) minuteRate(path string) int64 {
|
||||
func (s *NamespaceStream) minuteRate(path string) int64 {
|
||||
var total int64
|
||||
s.rateMu.RLock()
|
||||
defer s.rateMu.RUnlock()
|
||||
@ -215,11 +230,11 @@ func (s *ManagedStream) minuteRate(path string) int64 {
|
||||
return total
|
||||
}
|
||||
|
||||
func (s *ManagedStream) GetHandlerForPath(_ string) (models.ChannelHandler, error) {
|
||||
func (s *NamespaceStream) GetHandlerForPath(_ string) (models.ChannelHandler, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *ManagedStream) OnSubscribe(_ context.Context, u *models.SignedInUser, e models.SubscribeEvent) (models.SubscribeReply, backend.SubscribeStreamStatus, error) {
|
||||
func (s *NamespaceStream) OnSubscribe(_ context.Context, u *models.SignedInUser, e models.SubscribeEvent) (models.SubscribeReply, backend.SubscribeStreamStatus, error) {
|
||||
reply := models.SubscribeReply{}
|
||||
frameJSON, ok, err := s.frameCache.GetFrame(u.OrgId, e.Channel)
|
||||
if err != nil {
|
||||
@ -231,6 +246,6 @@ func (s *ManagedStream) OnSubscribe(_ context.Context, u *models.SignedInUser, e
|
||||
return reply, backend.SubscribeStreamStatusOK, nil
|
||||
}
|
||||
|
||||
func (s *ManagedStream) OnPublish(_ context.Context, _ *models.SignedInUser, _ models.PublishEvent) (models.PublishReply, backend.PublishStreamStatus, error) {
|
||||
func (s *NamespaceStream) OnPublish(_ context.Context, _ *models.SignedInUser, _ models.PublishEvent) (models.PublishReply, backend.PublishStreamStatus, error) {
|
||||
return models.PublishReply{}, backend.PublishStreamStatusPermissionDenied, nil
|
||||
}
|
||||
|
@ -18,13 +18,13 @@ func (p *testPublisher) publish(_ int64, _ string, _ []byte) error {
|
||||
|
||||
func TestNewManagedStream(t *testing.T) {
|
||||
publisher := &testPublisher{t: t}
|
||||
c := NewManagedStream("a", 1, publisher.publish, NewMemoryFrameCache())
|
||||
c := NewNamespaceStream(1, "stream", "a", publisher.publish, nil, NewMemoryFrameCache())
|
||||
require.NotNil(t, c)
|
||||
}
|
||||
|
||||
func TestManagedStreamMinuteRate(t *testing.T) {
|
||||
publisher := &testPublisher{t: t}
|
||||
c := NewManagedStream("a", 1, publisher.publish, NewMemoryFrameCache())
|
||||
c := NewNamespaceStream(1, "stream", "a", publisher.publish, nil, NewMemoryFrameCache())
|
||||
require.NotNil(t, c)
|
||||
|
||||
c.incRate("test1", time.Now().Unix())
|
||||
@ -47,10 +47,10 @@ func TestManagedStreamMinuteRate(t *testing.T) {
|
||||
func TestGetManagedStreams(t *testing.T) {
|
||||
publisher := &testPublisher{t: t}
|
||||
frameCache := NewMemoryFrameCache()
|
||||
runner := NewRunner(publisher.publish, frameCache)
|
||||
s1, err := runner.GetOrCreateStream(1, "test1")
|
||||
runner := NewRunner(publisher.publish, nil, frameCache)
|
||||
s1, err := runner.GetOrCreateStream(1, "stream", "test1")
|
||||
require.NoError(t, err)
|
||||
s2, err := runner.GetOrCreateStream(1, "test2")
|
||||
s2, err := runner.GetOrCreateStream(1, "stream", "test2")
|
||||
require.NoError(t, err)
|
||||
|
||||
managedChannels, err := runner.GetManagedChannels(1)
|
||||
@ -74,7 +74,7 @@ func TestGetManagedStreams(t *testing.T) {
|
||||
require.Equal(t, "stream/test2/cpu1", managedChannels[5].Channel)
|
||||
|
||||
// Different org.
|
||||
s3, err := runner.GetOrCreateStream(2, "test1")
|
||||
s3, err := runner.GetOrCreateStream(2, "stream", "test1")
|
||||
require.NoError(t, err)
|
||||
err = s3.Push("cpu1", data.NewFrame("cpu1"))
|
||||
require.NoError(t, err)
|
||||
|
12
pkg/services/live/pipeline/condition_checker.go
Normal file
12
pkg/services/live/pipeline/condition_checker.go
Normal file
@ -0,0 +1,12 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
// ConditionChecker checks conditions in context of data.Frame being processed.
|
||||
type ConditionChecker interface {
|
||||
CheckCondition(ctx context.Context, frame *data.Frame) (bool, error)
|
||||
}
|
45
pkg/services/live/pipeline/condition_checker_multiple.go
Normal file
45
pkg/services/live/pipeline/condition_checker_multiple.go
Normal file
@ -0,0 +1,45 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
// ConditionType represents multiple condition operator type.
|
||||
type ConditionType string
|
||||
|
||||
const (
|
||||
ConditionAll ConditionType = "all"
|
||||
ConditionAny ConditionType = "any"
|
||||
)
|
||||
|
||||
// MultipleConditionChecker can check multiple conditions according to ConditionType.
|
||||
type MultipleConditionChecker struct {
|
||||
Type ConditionType
|
||||
Conditions []ConditionChecker
|
||||
}
|
||||
|
||||
func (m MultipleConditionChecker) CheckCondition(ctx context.Context, frame *data.Frame) (bool, error) {
|
||||
for _, c := range m.Conditions {
|
||||
ok, err := c.CheckCondition(ctx, frame)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok && m.Type == ConditionAny {
|
||||
return true, nil
|
||||
}
|
||||
if !ok && m.Type == ConditionAll {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
if m.Type == ConditionAny {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// NewMultipleConditionChecker creates new MultipleConditionChecker.
|
||||
func NewMultipleConditionChecker(conditionType ConditionType, conditions ...ConditionChecker) *MultipleConditionChecker {
|
||||
return &MultipleConditionChecker{Type: conditionType, Conditions: conditions}
|
||||
}
|
64
pkg/services/live/pipeline/condition_number_compare.go
Normal file
64
pkg/services/live/pipeline/condition_number_compare.go
Normal file
@ -0,0 +1,64 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
// NumberCompareCondition can compare numbers.
|
||||
type NumberCompareCondition struct {
|
||||
FieldName string
|
||||
Op NumberCompareOp
|
||||
Value float64
|
||||
}
|
||||
|
||||
// NumberCompareOp is an comparison operator.
|
||||
type NumberCompareOp string
|
||||
|
||||
// Known NumberCompareOp types.
|
||||
const (
|
||||
NumberCompareOpLt NumberCompareOp = "lt"
|
||||
NumberCompareOpGt NumberCompareOp = "gt"
|
||||
NumberCompareOpLte NumberCompareOp = "lte"
|
||||
NumberCompareOpGte NumberCompareOp = "gte"
|
||||
NumberCompareOpEq NumberCompareOp = "eq"
|
||||
NumberCompareOpNe NumberCompareOp = "ne"
|
||||
)
|
||||
|
||||
func (f NumberCompareCondition) CheckCondition(_ context.Context, frame *data.Frame) (bool, error) {
|
||||
for _, field := range frame.Fields {
|
||||
// TODO: support other numeric types.
|
||||
if field.Name == f.FieldName && (field.Type() == data.FieldTypeNullableFloat64) {
|
||||
value, ok := field.At(0).(*float64)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("unexpected value type: %T", field.At(0))
|
||||
}
|
||||
if value == nil {
|
||||
return false, nil
|
||||
}
|
||||
switch f.Op {
|
||||
case NumberCompareOpGt:
|
||||
return *value > f.Value, nil
|
||||
case NumberCompareOpGte:
|
||||
return *value >= f.Value, nil
|
||||
case NumberCompareOpLte:
|
||||
return *value <= f.Value, nil
|
||||
case NumberCompareOpLt:
|
||||
return *value < f.Value, nil
|
||||
case NumberCompareOpEq:
|
||||
return *value == f.Value, nil
|
||||
case NumberCompareOpNe:
|
||||
return *value != f.Value, nil
|
||||
default:
|
||||
return false, fmt.Errorf("unknown comparison operator: %s", f.Op)
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func NewNumberCompareCondition(fieldName string, op NumberCompareOp, value float64) *NumberCompareCondition {
|
||||
return &NumberCompareCondition{FieldName: fieldName, Op: op, Value: value}
|
||||
}
|
321
pkg/services/live/pipeline/config.go
Normal file
321
pkg/services/live/pipeline/config.go
Normal file
@ -0,0 +1,321 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/live/managedstream"
|
||||
|
||||
"github.com/centrifugal/centrifuge"
|
||||
)
|
||||
|
||||
type JsonAutoSettings struct{}
|
||||
|
||||
type ConverterConfig struct {
|
||||
Type string `json:"type"`
|
||||
AutoJsonConverterConfig *AutoJsonConverterConfig `json:"jsonAuto,omitempty"`
|
||||
ExactJsonConverterConfig *ExactJsonConverterConfig `json:"jsonExact,omitempty"`
|
||||
AutoInfluxConverterConfig *AutoInfluxConverterConfig `json:"influxAuto,omitempty"`
|
||||
JsonFrameConverterConfig *JsonFrameConverterConfig `json:"jsonFrame,omitempty"`
|
||||
}
|
||||
|
||||
type ProcessorConfig struct {
|
||||
Type string `json:"type"`
|
||||
DropFieldsProcessorConfig *DropFieldsProcessorConfig `json:"dropFields,omitempty"`
|
||||
KeepFieldsProcessorConfig *KeepFieldsProcessorConfig `json:"keepFields,omitempty"`
|
||||
MultipleProcessorConfig *MultipleProcessorConfig `json:"multiple,omitempty"`
|
||||
}
|
||||
|
||||
type MultipleProcessorConfig struct {
|
||||
Processors []ProcessorConfig `json:"processors"`
|
||||
}
|
||||
|
||||
type MultipleOutputterConfig struct {
|
||||
Outputters []OutputterConfig `json:"outputs"`
|
||||
}
|
||||
|
||||
type ManagedStreamOutputConfig struct{}
|
||||
|
||||
type ConditionalOutputConfig struct {
|
||||
Condition *ConditionCheckerConfig `json:"condition"`
|
||||
Outputter *OutputterConfig `json:"output"`
|
||||
}
|
||||
|
||||
type RemoteWriteOutputConfig struct {
|
||||
UID string `json:"uid"`
|
||||
}
|
||||
|
||||
type OutputterConfig struct {
|
||||
Type string `json:"type"`
|
||||
ManagedStreamConfig *ManagedStreamOutputConfig `json:"managedStream,omitempty"`
|
||||
MultipleOutputterConfig *MultipleOutputterConfig `json:"multiple,omitempty"`
|
||||
RedirectOutputConfig *RedirectOutputConfig `json:"redirect,omitempty"`
|
||||
ConditionalOutputConfig *ConditionalOutputConfig `json:"conditional,omitempty"`
|
||||
ThresholdOutputConfig *ThresholdOutputConfig `json:"threshold,omitempty"`
|
||||
RemoteWriteOutputConfig *RemoteWriteOutputConfig `json:"remoteWrite,omitempty"`
|
||||
ChangeLogOutputConfig *ChangeLogOutputConfig `json:"changeLog,omitempty"`
|
||||
}
|
||||
|
||||
type ChannelRuleSettings struct {
|
||||
Converter *ConverterConfig `json:"converter,omitempty"`
|
||||
Processor *ProcessorConfig `json:"processor,omitempty"`
|
||||
Outputter *OutputterConfig `json:"output,omitempty"`
|
||||
}
|
||||
|
||||
type ChannelRule struct {
|
||||
OrgId int64 `json:"-"`
|
||||
Pattern string `json:"pattern"`
|
||||
Settings ChannelRuleSettings `json:"settings"`
|
||||
}
|
||||
|
||||
type RemoteWriteBackend struct {
|
||||
OrgId int64 `json:"-"`
|
||||
UID string `json:"uid"`
|
||||
Settings *RemoteWriteConfig `json:"settings"`
|
||||
}
|
||||
|
||||
type RemoteWriteBackends struct {
|
||||
Backends []RemoteWriteBackend `json:"remoteWriteBackends"`
|
||||
}
|
||||
|
||||
type ChannelRules struct {
|
||||
Rules []ChannelRule `json:"rules"`
|
||||
}
|
||||
|
||||
type MultipleConditionCheckerConfig struct {
|
||||
Type ConditionType `json:"type"`
|
||||
Conditions []ConditionCheckerConfig `json:"conditions"`
|
||||
}
|
||||
|
||||
type NumberCompareConditionConfig struct {
|
||||
FieldName string `json:"fieldName"`
|
||||
Op NumberCompareOp `json:"op"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
type ConditionCheckerConfig struct {
|
||||
Type string `json:"type"`
|
||||
MultipleConditionCheckerConfig *MultipleConditionCheckerConfig `json:"multiple,omitempty"`
|
||||
NumberCompareConditionConfig *NumberCompareConditionConfig `json:"numberCompare,omitempty"`
|
||||
}
|
||||
|
||||
type RuleStorage interface {
|
||||
ListRemoteWriteBackends(_ context.Context, orgID int64) ([]RemoteWriteBackend, error)
|
||||
ListChannelRules(_ context.Context, orgID int64) ([]ChannelRule, error)
|
||||
}
|
||||
|
||||
type StorageRuleBuilder struct {
|
||||
Node *centrifuge.Node
|
||||
ManagedStream *managedstream.Runner
|
||||
FrameStorage *FrameStorage
|
||||
RuleStorage RuleStorage
|
||||
}
|
||||
|
||||
func (f *StorageRuleBuilder) extractConverter(config *ConverterConfig) (Converter, error) {
|
||||
if config == nil {
|
||||
return nil, nil
|
||||
}
|
||||
missingConfiguration := fmt.Errorf("missing configuration for %s", config.Type)
|
||||
switch config.Type {
|
||||
case "jsonAuto":
|
||||
if config.AutoJsonConverterConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
return NewAutoJsonConverter(*config.AutoJsonConverterConfig), nil
|
||||
case "jsonExact":
|
||||
if config.ExactJsonConverterConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
return NewExactJsonConverter(*config.ExactJsonConverterConfig), nil
|
||||
case "jsonFrame":
|
||||
if config.JsonFrameConverterConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
return NewJsonFrameConverter(*config.JsonFrameConverterConfig), nil
|
||||
case "influxAuto":
|
||||
if config.AutoInfluxConverterConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
return NewAutoInfluxConverter(*config.AutoInfluxConverterConfig), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown converter type: %s", config.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *StorageRuleBuilder) extractProcessor(config *ProcessorConfig) (Processor, error) {
|
||||
if config == nil {
|
||||
return nil, nil
|
||||
}
|
||||
missingConfiguration := fmt.Errorf("missing configuration for %s", config.Type)
|
||||
switch config.Type {
|
||||
case "dropFields":
|
||||
if config.DropFieldsProcessorConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
return NewDropFieldsProcessor(*config.DropFieldsProcessorConfig), nil
|
||||
case "keepFields":
|
||||
if config.KeepFieldsProcessorConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
return NewKeepFieldsProcessor(*config.KeepFieldsProcessorConfig), nil
|
||||
case "multiple":
|
||||
if config.MultipleProcessorConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
var processors []Processor
|
||||
for _, outConf := range config.MultipleProcessorConfig.Processors {
|
||||
out := outConf
|
||||
proc, err := f.extractProcessor(&out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
processors = append(processors, proc)
|
||||
}
|
||||
return NewMultipleProcessor(processors...), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown processor type: %s", config.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *StorageRuleBuilder) extractConditionChecker(config *ConditionCheckerConfig) (ConditionChecker, error) {
|
||||
if config == nil {
|
||||
return nil, nil
|
||||
}
|
||||
missingConfiguration := fmt.Errorf("missing configuration for %s", config.Type)
|
||||
switch config.Type {
|
||||
case "numberCompare":
|
||||
if config.NumberCompareConditionConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
c := *config.NumberCompareConditionConfig
|
||||
return NewNumberCompareCondition(c.FieldName, c.Op, c.Value), nil
|
||||
case "multiple":
|
||||
var conditions []ConditionChecker
|
||||
if config.MultipleConditionCheckerConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
for _, outConf := range config.MultipleConditionCheckerConfig.Conditions {
|
||||
out := outConf
|
||||
cond, err := f.extractConditionChecker(&out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conditions = append(conditions, cond)
|
||||
}
|
||||
return NewMultipleConditionChecker(config.MultipleConditionCheckerConfig.Type, conditions...), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown condition type: %s", config.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *StorageRuleBuilder) extractOutputter(config *OutputterConfig, remoteWriteBackends []RemoteWriteBackend) (Outputter, error) {
|
||||
if config == nil {
|
||||
return nil, nil
|
||||
}
|
||||
missingConfiguration := fmt.Errorf("missing configuration for %s", config.Type)
|
||||
switch config.Type {
|
||||
case "redirect":
|
||||
if config.RedirectOutputConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
return NewRedirectOutput(*config.RedirectOutputConfig), nil
|
||||
case "multiple":
|
||||
if config.MultipleOutputterConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
var outputters []Outputter
|
||||
for _, outConf := range config.MultipleOutputterConfig.Outputters {
|
||||
out := outConf
|
||||
outputter, err := f.extractOutputter(&out, remoteWriteBackends)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outputters = append(outputters, outputter)
|
||||
}
|
||||
return NewMultipleOutput(outputters...), nil
|
||||
case "managedStream":
|
||||
return NewManagedStreamOutput(f.ManagedStream), nil
|
||||
case "localSubscribers":
|
||||
return NewLocalSubscribersOutput(f.Node), nil
|
||||
case "conditional":
|
||||
if config.ConditionalOutputConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
condition, err := f.extractConditionChecker(config.ConditionalOutputConfig.Condition)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outputter, err := f.extractOutputter(config.ConditionalOutputConfig.Outputter, remoteWriteBackends)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewConditionalOutput(condition, outputter), nil
|
||||
case "threshold":
|
||||
if config.ThresholdOutputConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
return NewThresholdOutput(f.FrameStorage, *config.ThresholdOutputConfig), nil
|
||||
case "remoteWrite":
|
||||
if config.RemoteWriteOutputConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
remoteWriteConfig, ok := f.getRemoteWriteConfig(config.RemoteWriteOutputConfig.UID, remoteWriteBackends)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown remote write backend uid: %s", config.RemoteWriteOutputConfig.UID)
|
||||
}
|
||||
return NewRemoteWriteOutput(*remoteWriteConfig), nil
|
||||
case "changeLog":
|
||||
if config.ChangeLogOutputConfig == nil {
|
||||
return nil, missingConfiguration
|
||||
}
|
||||
return NewChangeLogOutput(f.FrameStorage, *config.ChangeLogOutputConfig), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown output type: %s", config.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *StorageRuleBuilder) getRemoteWriteConfig(uid string, remoteWriteBackends []RemoteWriteBackend) (*RemoteWriteConfig, bool) {
|
||||
for _, rwb := range remoteWriteBackends {
|
||||
if rwb.UID == uid {
|
||||
return rwb.Settings, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (f *StorageRuleBuilder) BuildRules(ctx context.Context, orgID int64) ([]*LiveChannelRule, error) {
|
||||
channelRules, err := f.RuleStorage.ListChannelRules(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remoteWriteBackends, err := f.RuleStorage.ListRemoteWriteBackends(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var rules []*LiveChannelRule
|
||||
|
||||
for _, ruleConfig := range channelRules {
|
||||
rule := &LiveChannelRule{
|
||||
OrgId: orgID,
|
||||
Pattern: ruleConfig.Pattern,
|
||||
}
|
||||
var err error
|
||||
rule.Converter, err = f.extractConverter(ruleConfig.Settings.Converter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rule.Processor, err = f.extractProcessor(ruleConfig.Settings.Processor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rule.Outputter, err = f.extractOutputter(ruleConfig.Settings.Outputter, remoteWriteBackends)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rules = append(rules, rule)
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
38
pkg/services/live/pipeline/converter_influx_auto.go
Normal file
38
pkg/services/live/pipeline/converter_influx_auto.go
Normal file
@ -0,0 +1,38 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/live/convert"
|
||||
)
|
||||
|
||||
type AutoInfluxConverterConfig struct {
|
||||
FrameFormat string `json:"frameFormat"`
|
||||
}
|
||||
|
||||
// AutoInfluxConverter decodes Influx line protocol input and transforms it
|
||||
// to several ChannelFrame objects where Channel is constructed from original
|
||||
// channel + / + <metric_name>.
|
||||
type AutoInfluxConverter struct {
|
||||
config AutoInfluxConverterConfig
|
||||
converter *convert.Converter
|
||||
}
|
||||
|
||||
func NewAutoInfluxConverter(config AutoInfluxConverterConfig) *AutoInfluxConverter {
|
||||
return &AutoInfluxConverter{config: config, converter: convert.NewConverter()}
|
||||
}
|
||||
|
||||
func (i AutoInfluxConverter) Convert(_ context.Context, vars Vars, body []byte) ([]*ChannelFrame, error) {
|
||||
frameWrappers, err := i.converter.Convert(body, i.config.FrameFormat)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
channelFrames := make([]*ChannelFrame, 0, len(frameWrappers))
|
||||
for _, fw := range frameWrappers {
|
||||
channelFrames = append(channelFrames, &ChannelFrame{
|
||||
Channel: vars.Channel + "/" + fw.Key(),
|
||||
Frame: fw.Frame(),
|
||||
})
|
||||
}
|
||||
return channelFrames, nil
|
||||
}
|
39
pkg/services/live/pipeline/converter_json_auto.go
Normal file
39
pkg/services/live/pipeline/converter_json_auto.go
Normal file
@ -0,0 +1,39 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AutoJsonConverterConfig struct {
|
||||
FieldTips map[string]Field `json:"fieldTips"`
|
||||
}
|
||||
|
||||
type AutoJsonConverter struct {
|
||||
config AutoJsonConverterConfig
|
||||
nowTimeFunc func() time.Time
|
||||
}
|
||||
|
||||
func NewAutoJsonConverter(c AutoJsonConverterConfig) *AutoJsonConverter {
|
||||
return &AutoJsonConverter{config: c}
|
||||
}
|
||||
|
||||
// Automatic conversion works this way:
|
||||
// * Time added automatically
|
||||
// * Nulls dropped
|
||||
// To preserve nulls we need FieldTips from a user.
|
||||
// Custom time can be injected on Processor stage theoretically.
|
||||
// Custom labels can be injected on Processor stage theoretically.
|
||||
func (c *AutoJsonConverter) Convert(_ context.Context, vars Vars, body []byte) ([]*ChannelFrame, error) {
|
||||
nowTimeFunc := c.nowTimeFunc
|
||||
if nowTimeFunc == nil {
|
||||
nowTimeFunc = time.Now
|
||||
}
|
||||
frame, err := jsonDocToFrame(vars.Path, body, c.config.FieldTips, nowTimeFunc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*ChannelFrame{
|
||||
{Channel: "", Frame: frame},
|
||||
}, nil
|
||||
}
|
52
pkg/services/live/pipeline/converter_json_auto_test.go
Normal file
52
pkg/services/live/pipeline/converter_json_auto_test.go
Normal file
@ -0,0 +1,52 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/experimental"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var update = flag.Bool("update", false, "update golden files")
|
||||
|
||||
func loadTestJson(tb testing.TB, file string) []byte {
|
||||
tb.Helper()
|
||||
// Safe to disable, this is a test.
|
||||
// nolint:gosec
|
||||
content, err := ioutil.ReadFile(filepath.Join("testdata", file+".json"))
|
||||
require.NoError(tb, err, "expected to be able to read file")
|
||||
require.True(tb, len(content) > 0)
|
||||
return content
|
||||
}
|
||||
|
||||
func checkAutoConversion(tb testing.TB, file string) *backend.DataResponse {
|
||||
tb.Helper()
|
||||
content := loadTestJson(tb, file)
|
||||
|
||||
converter := NewAutoJsonConverter(AutoJsonConverterConfig{})
|
||||
converter.nowTimeFunc = func() time.Time {
|
||||
return time.Date(2021, 01, 01, 12, 12, 12, 0, time.UTC)
|
||||
}
|
||||
channelFrames, err := converter.Convert(context.Background(), Vars{}, content)
|
||||
require.NoError(tb, err)
|
||||
|
||||
dr := &backend.DataResponse{}
|
||||
for _, cf := range channelFrames {
|
||||
require.Empty(tb, cf.Channel)
|
||||
dr.Frames = append(dr.Frames, cf.Frame)
|
||||
}
|
||||
|
||||
err = experimental.CheckGoldenDataResponse(filepath.Join("testdata", file+".golden.txt"), dr, *update)
|
||||
require.NoError(tb, err)
|
||||
return dr
|
||||
}
|
||||
|
||||
func TestAutoJsonConverter_Convert(t *testing.T) {
|
||||
checkAutoConversion(t, "json_auto")
|
||||
}
|
159
pkg/services/live/pipeline/converter_json_exact.go
Normal file
159
pkg/services/live/pipeline/converter_json_exact.go
Normal file
@ -0,0 +1,159 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/ohler55/ojg/jp"
|
||||
"github.com/ohler55/ojg/oj"
|
||||
)
|
||||
|
||||
type ExactJsonConverterConfig struct {
|
||||
Fields []Field `json:"fields"`
|
||||
}
|
||||
|
||||
// ExactJsonConverter can convert JSON to a single data.Frame according to
|
||||
// user-defined field configuration and value extraction rules.
|
||||
type ExactJsonConverter struct {
|
||||
config ExactJsonConverterConfig
|
||||
nowTimeFunc func() time.Time
|
||||
}
|
||||
|
||||
func NewExactJsonConverter(c ExactJsonConverterConfig) *ExactJsonConverter {
|
||||
return &ExactJsonConverter{config: c}
|
||||
}
|
||||
|
||||
func (c *ExactJsonConverter) Convert(_ context.Context, vars Vars, body []byte) ([]*ChannelFrame, error) {
|
||||
obj, err := oj.Parse(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fields []*data.Field
|
||||
|
||||
var initGojaOnce sync.Once
|
||||
var gojaRuntime *gojaRuntime
|
||||
|
||||
for _, f := range c.config.Fields {
|
||||
field := data.NewFieldFromFieldType(f.Type, 1)
|
||||
field.Name = f.Name
|
||||
field.Config = f.Config
|
||||
|
||||
if strings.HasPrefix(f.Value, "$") {
|
||||
// JSON path.
|
||||
x, err := jp.ParseString(f.Value[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value := x.Get(obj)
|
||||
if len(value) == 0 {
|
||||
field.Set(0, nil)
|
||||
} else if len(value) == 1 {
|
||||
val := value[0]
|
||||
switch f.Type {
|
||||
case data.FieldTypeNullableFloat64:
|
||||
if val == nil {
|
||||
field.Set(0, nil)
|
||||
} else {
|
||||
switch v := val.(type) {
|
||||
case float64:
|
||||
field.SetConcrete(0, v)
|
||||
case int64:
|
||||
field.SetConcrete(0, float64(v))
|
||||
default:
|
||||
return nil, errors.New("malformed float64 type for: " + f.Name)
|
||||
}
|
||||
}
|
||||
case data.FieldTypeNullableString:
|
||||
v, ok := val.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("malformed string type")
|
||||
}
|
||||
field.SetConcrete(0, v)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported field type: %s (%s)", f.Type, f.Name)
|
||||
}
|
||||
} else {
|
||||
return nil, errors.New("too many values")
|
||||
}
|
||||
} else if strings.HasPrefix(f.Value, "{") {
|
||||
// Goja script.
|
||||
script := strings.Trim(f.Value, "{}")
|
||||
var err error
|
||||
initGojaOnce.Do(func() {
|
||||
gojaRuntime, err = getRuntime(body)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch f.Type {
|
||||
case data.FieldTypeNullableBool:
|
||||
v, err := gojaRuntime.getBool(script)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
field.SetConcrete(0, v)
|
||||
case data.FieldTypeNullableFloat64:
|
||||
v, err := gojaRuntime.getFloat64(script)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
field.SetConcrete(0, v)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported field type: %s (%s)", f.Type, f.Name)
|
||||
}
|
||||
} else if f.Value == "#{now}" {
|
||||
// Variable.
|
||||
// TODO: make consistent with Grafana variables?
|
||||
nowTimeFunc := c.nowTimeFunc
|
||||
if nowTimeFunc == nil {
|
||||
nowTimeFunc = time.Now
|
||||
}
|
||||
field.SetConcrete(0, nowTimeFunc())
|
||||
}
|
||||
|
||||
labels := map[string]string{}
|
||||
for _, label := range f.Labels {
|
||||
if strings.HasPrefix(label.Value, "$") {
|
||||
x, err := jp.ParseString(label.Value[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value := x.Get(obj)
|
||||
if len(value) == 0 {
|
||||
labels[label.Name] = ""
|
||||
} else if len(value) == 1 {
|
||||
labels[label.Name] = fmt.Sprintf("%v", value[0])
|
||||
} else {
|
||||
return nil, errors.New("too many values for a label")
|
||||
}
|
||||
} else if strings.HasPrefix(label.Value, "{") {
|
||||
script := strings.Trim(label.Value, "{}")
|
||||
var err error
|
||||
initGojaOnce.Do(func() {
|
||||
gojaRuntime, err = getRuntime(body)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, err := gojaRuntime.getString(script)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
labels[label.Name] = v
|
||||
}
|
||||
}
|
||||
field.Labels = labels
|
||||
fields = append(fields, field)
|
||||
}
|
||||
|
||||
frame := data.NewFrame(vars.Path, fields...)
|
||||
return []*ChannelFrame{
|
||||
{Channel: "", Frame: frame},
|
||||
}, nil
|
||||
}
|
68
pkg/services/live/pipeline/converter_json_exact_test.go
Normal file
68
pkg/services/live/pipeline/converter_json_exact_test.go
Normal file
@ -0,0 +1,68 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/experimental"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func checkExactConversion(tb testing.TB, file string, fields []Field) *backend.DataResponse {
|
||||
tb.Helper()
|
||||
content := loadTestJson(tb, file)
|
||||
|
||||
converter := NewExactJsonConverter(ExactJsonConverterConfig{
|
||||
Fields: fields,
|
||||
})
|
||||
converter.nowTimeFunc = func() time.Time {
|
||||
return time.Date(2021, 01, 01, 12, 12, 12, 0, time.UTC)
|
||||
}
|
||||
channelFrames, err := converter.Convert(context.Background(), Vars{}, content)
|
||||
require.NoError(tb, err)
|
||||
|
||||
dr := &backend.DataResponse{}
|
||||
for _, cf := range channelFrames {
|
||||
require.Empty(tb, cf.Channel)
|
||||
dr.Frames = append(dr.Frames, cf.Frame)
|
||||
}
|
||||
|
||||
err = experimental.CheckGoldenDataResponse(filepath.Join("testdata", file+".golden.txt"), dr, *update)
|
||||
require.NoError(tb, err)
|
||||
return dr
|
||||
}
|
||||
|
||||
func TestExactJsonConverter_Convert(t *testing.T) {
|
||||
checkExactConversion(t, "json_exact", []Field{
|
||||
{
|
||||
Name: "time",
|
||||
Value: "#{now}",
|
||||
Type: data.FieldTypeTime,
|
||||
},
|
||||
{
|
||||
Name: "ax",
|
||||
Value: "$.ax",
|
||||
Type: data.FieldTypeNullableFloat64,
|
||||
},
|
||||
{
|
||||
Name: "key1",
|
||||
Value: "{x.map_with_floats.key1}",
|
||||
Type: data.FieldTypeNullableFloat64,
|
||||
Labels: []Label{
|
||||
{
|
||||
Name: "label1",
|
||||
Value: "{x.map_with_floats.key2.toString()}",
|
||||
},
|
||||
{
|
||||
Name: "label2",
|
||||
Value: "$.map_with_floats.key2",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
32
pkg/services/live/pipeline/converter_json_frame.go
Normal file
32
pkg/services/live/pipeline/converter_json_frame.go
Normal file
@ -0,0 +1,32 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
type JsonFrameConverterConfig struct{}
|
||||
|
||||
// JsonFrameConverter decodes single data.Frame from JSON.
|
||||
type JsonFrameConverter struct {
|
||||
config JsonFrameConverterConfig
|
||||
}
|
||||
|
||||
func NewJsonFrameConverter(c JsonFrameConverterConfig) *JsonFrameConverter {
|
||||
return &JsonFrameConverter{
|
||||
config: c,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *JsonFrameConverter) Convert(_ context.Context, _ Vars, body []byte) ([]*ChannelFrame, error) {
|
||||
var frame data.Frame
|
||||
err := json.Unmarshal(body, &frame)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*ChannelFrame{
|
||||
{Channel: "", Frame: &frame},
|
||||
}, nil
|
||||
}
|
329
pkg/services/live/pipeline/devdata.go
Normal file
329
pkg/services/live/pipeline/devdata.go
Normal file
@ -0,0 +1,329 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/centrifugal/centrifuge"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana/pkg/services/live/managedstream"
|
||||
)
|
||||
|
||||
type Data struct {
|
||||
Value1 float64 `json:"value1"`
|
||||
Value2 float64 `json:"value2"`
|
||||
Value3 *float64 `json:"value3"`
|
||||
Value4 float64 `json:"value4"`
|
||||
Annotation string `json:"annotation"`
|
||||
Array []float64 `json:"array"`
|
||||
Map map[string]interface{} `json:"map"`
|
||||
Host string `json:"host"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// TODO: temporary for development, remove.
|
||||
func postTestData() {
|
||||
i := 0
|
||||
for {
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
num1 := rand.Intn(10)
|
||||
num2 := rand.Intn(10)
|
||||
d := Data{
|
||||
Value1: float64(num1),
|
||||
Value2: float64(num2),
|
||||
Value4: float64(i % 10),
|
||||
Annotation: "odd",
|
||||
Array: []float64{float64(rand.Intn(10)), float64(rand.Intn(10))},
|
||||
Map: map[string]interface{}{
|
||||
"red": 1,
|
||||
"yellow": 4,
|
||||
"green": 7,
|
||||
},
|
||||
Host: "macbook-local",
|
||||
Status: "running",
|
||||
}
|
||||
if i%2 != 0 {
|
||||
val := 4.0
|
||||
d.Value3 = &val
|
||||
}
|
||||
if i%2 == 0 {
|
||||
val := 3.0
|
||||
d.Value3 = &val
|
||||
d.Annotation = "even"
|
||||
}
|
||||
if i%10 == 0 {
|
||||
d.Value3 = nil
|
||||
}
|
||||
jsonData, _ := json.Marshal(d)
|
||||
log.Println(string(jsonData))
|
||||
|
||||
req, _ := http.NewRequest("POST", "http://localhost:3000/api/live/push/json/auto", bytes.NewReader(jsonData))
|
||||
req.Header.Set("Authorization", "Bearer "+os.Getenv("GF_TOKEN"))
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
req, _ = http.NewRequest("POST", "http://localhost:3000/api/live/push/json/tip", bytes.NewReader(jsonData))
|
||||
req.Header.Set("Authorization", "Bearer "+os.Getenv("GF_TOKEN"))
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
req, _ = http.NewRequest("POST", "http://localhost:3000/api/live/push/json/exact", bytes.NewReader(jsonData))
|
||||
req.Header.Set("Authorization", "Bearer "+os.Getenv("GF_TOKEN"))
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
type DevRuleBuilder struct {
|
||||
Node *centrifuge.Node
|
||||
ManagedStream *managedstream.Runner
|
||||
FrameStorage *FrameStorage
|
||||
}
|
||||
|
||||
func (f *DevRuleBuilder) BuildRules(_ context.Context, _ int64) ([]*LiveChannelRule, error) {
|
||||
return []*LiveChannelRule{
|
||||
{
|
||||
Pattern: "plugin/testdata/random-20Hz-stream",
|
||||
Converter: NewJsonFrameConverter(JsonFrameConverterConfig{}),
|
||||
Outputter: NewMultipleOutput(
|
||||
NewManagedStreamOutput(f.ManagedStream),
|
||||
NewRedirectOutput(RedirectOutputConfig{
|
||||
Channel: "stream/testdata/random-20Hz-stream",
|
||||
}),
|
||||
),
|
||||
},
|
||||
{
|
||||
Pattern: "stream/testdata/random-20Hz-stream",
|
||||
Processor: NewKeepFieldsProcessor(KeepFieldsProcessorConfig{
|
||||
FieldNames: []string{"Time", "Min", "Max"},
|
||||
}),
|
||||
Outputter: NewManagedStreamOutput(f.ManagedStream),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/influx/input",
|
||||
Converter: NewAutoInfluxConverter(AutoInfluxConverterConfig{
|
||||
FrameFormat: "labels_column",
|
||||
}),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/influx/input/:rest",
|
||||
Outputter: NewManagedStreamOutput(f.ManagedStream),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/influx/input/cpu",
|
||||
// TODO: Would be fine to have KeepLabelsProcessor, but we need to know frame type
|
||||
// since there are cases when labels attached to a field, and cases where labels
|
||||
// set in a first frame column (in Influx converter). For example, this will allow
|
||||
// to leave only "total-cpu" data while dropping individual CPUs.
|
||||
Processor: NewKeepFieldsProcessor(KeepFieldsProcessorConfig{
|
||||
FieldNames: []string{"labels", "time", "usage_user"},
|
||||
}),
|
||||
Outputter: NewMultipleOutput(
|
||||
NewManagedStreamOutput(f.ManagedStream),
|
||||
NewConditionalOutput(
|
||||
NewNumberCompareCondition("usage_user", "gte", 50),
|
||||
NewRedirectOutput(RedirectOutputConfig{
|
||||
Channel: "stream/influx/input/cpu/spikes",
|
||||
}),
|
||||
),
|
||||
),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/influx/input/cpu/spikes",
|
||||
Outputter: NewManagedStreamOutput(f.ManagedStream),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/json/auto",
|
||||
Converter: NewAutoJsonConverter(AutoJsonConverterConfig{}),
|
||||
Outputter: NewManagedStreamOutput(f.ManagedStream),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/json/tip",
|
||||
Converter: NewAutoJsonConverter(AutoJsonConverterConfig{
|
||||
FieldTips: map[string]Field{
|
||||
"value3": {
|
||||
Name: "value3",
|
||||
Type: data.FieldTypeNullableFloat64,
|
||||
},
|
||||
"value100": {
|
||||
Name: "value100",
|
||||
Type: data.FieldTypeNullableFloat64,
|
||||
},
|
||||
},
|
||||
}),
|
||||
Processor: NewDropFieldsProcessor(DropFieldsProcessorConfig{
|
||||
FieldNames: []string{"value2"},
|
||||
}),
|
||||
Outputter: NewManagedStreamOutput(f.ManagedStream),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/json/exact",
|
||||
Converter: NewExactJsonConverter(ExactJsonConverterConfig{
|
||||
Fields: []Field{
|
||||
{
|
||||
Name: "time",
|
||||
Type: data.FieldTypeTime,
|
||||
Value: "#{now}",
|
||||
},
|
||||
{
|
||||
Name: "value1",
|
||||
Type: data.FieldTypeNullableFloat64,
|
||||
Value: "$.value1",
|
||||
},
|
||||
{
|
||||
Name: "value2",
|
||||
Type: data.FieldTypeNullableFloat64,
|
||||
Value: "$.value2",
|
||||
},
|
||||
{
|
||||
Name: "value3",
|
||||
Type: data.FieldTypeNullableFloat64,
|
||||
Value: "$.value3",
|
||||
Labels: []Label{
|
||||
{
|
||||
Name: "host",
|
||||
Value: "$.host",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "value4",
|
||||
Type: data.FieldTypeNullableFloat64,
|
||||
Value: "$.value4",
|
||||
Config: &data.FieldConfig{
|
||||
Thresholds: &data.ThresholdsConfig{
|
||||
Mode: data.ThresholdsModeAbsolute,
|
||||
Steps: []data.Threshold{
|
||||
{
|
||||
Value: 2,
|
||||
State: "normal",
|
||||
Color: "green",
|
||||
},
|
||||
{
|
||||
Value: 6,
|
||||
State: "warning",
|
||||
Color: "orange",
|
||||
},
|
||||
{
|
||||
Value: 8,
|
||||
State: "critical",
|
||||
Color: "red",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "map.red",
|
||||
Type: data.FieldTypeNullableFloat64,
|
||||
Value: "$.map.red",
|
||||
Labels: []Label{
|
||||
{
|
||||
Name: "host",
|
||||
Value: "$.host",
|
||||
},
|
||||
{
|
||||
Name: "host2",
|
||||
Value: "$.host",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "annotation",
|
||||
Type: data.FieldTypeNullableString,
|
||||
Value: "$.annotation",
|
||||
},
|
||||
{
|
||||
Name: "running",
|
||||
Type: data.FieldTypeNullableBool,
|
||||
Value: "{x.status === 'running'}",
|
||||
},
|
||||
{
|
||||
Name: "num_map_colors",
|
||||
Type: data.FieldTypeNullableFloat64,
|
||||
Value: "{Object.keys(x.map).length}",
|
||||
},
|
||||
},
|
||||
}),
|
||||
Outputter: NewMultipleOutput(
|
||||
NewManagedStreamOutput(f.ManagedStream),
|
||||
NewRemoteWriteOutput(RemoteWriteConfig{
|
||||
Endpoint: os.Getenv("GF_LIVE_REMOTE_WRITE_ENDPOINT"),
|
||||
User: os.Getenv("GF_LIVE_REMOTE_WRITE_USER"),
|
||||
Password: os.Getenv("GF_LIVE_REMOTE_WRITE_PASSWORD"),
|
||||
}),
|
||||
NewChangeLogOutput(f.FrameStorage, ChangeLogOutputConfig{
|
||||
FieldName: "value3",
|
||||
Channel: "stream/json/exact/value3/changes",
|
||||
}),
|
||||
NewChangeLogOutput(f.FrameStorage, ChangeLogOutputConfig{
|
||||
FieldName: "annotation",
|
||||
Channel: "stream/json/exact/annotation/changes",
|
||||
}),
|
||||
NewConditionalOutput(
|
||||
NewMultipleConditionChecker(
|
||||
ConditionAll,
|
||||
NewNumberCompareCondition("value1", "gte", 3.0),
|
||||
NewNumberCompareCondition("value2", "gte", 3.0),
|
||||
),
|
||||
NewRedirectOutput(RedirectOutputConfig{
|
||||
Channel: "stream/json/exact/condition",
|
||||
}),
|
||||
),
|
||||
NewThresholdOutput(f.FrameStorage, ThresholdOutputConfig{
|
||||
FieldName: "value4",
|
||||
Channel: "stream/json/exact/value4/state",
|
||||
}),
|
||||
),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/json/exact/value3/changes",
|
||||
Outputter: NewMultipleOutput(
|
||||
NewManagedStreamOutput(f.ManagedStream),
|
||||
NewRemoteWriteOutput(RemoteWriteConfig{
|
||||
Endpoint: os.Getenv("GF_LIVE_REMOTE_WRITE_ENDPOINT"),
|
||||
User: os.Getenv("GF_LIVE_REMOTE_WRITE_USER"),
|
||||
Password: os.Getenv("GF_LIVE_REMOTE_WRITE_PASSWORD"),
|
||||
}),
|
||||
),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/json/exact/annotation/changes",
|
||||
Outputter: NewManagedStreamOutput(f.ManagedStream),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/json/exact/condition",
|
||||
Outputter: NewManagedStreamOutput(f.ManagedStream),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/json/exact/value4/state",
|
||||
Outputter: NewManagedStreamOutput(f.ManagedStream),
|
||||
},
|
||||
}, nil
|
||||
}
|
37
pkg/services/live/pipeline/frame_storage.go
Normal file
37
pkg/services/live/pipeline/frame_storage.go
Normal file
@ -0,0 +1,37 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/live/orgchannel"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
// FrameStorage keeps last channel frame in memory. Not usable in HA setup.
|
||||
type FrameStorage struct {
|
||||
mu sync.RWMutex
|
||||
frames map[string]*data.Frame
|
||||
}
|
||||
|
||||
func NewFrameStorage() *FrameStorage {
|
||||
return &FrameStorage{
|
||||
frames: map[string]*data.Frame{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FrameStorage) Set(orgID int64, channel string, frame *data.Frame) error {
|
||||
key := orgchannel.PrependOrgID(orgID, channel)
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.frames[key] = frame
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FrameStorage) Get(orgID int64, channel string) (*data.Frame, bool, error) {
|
||||
key := orgchannel.PrependOrgID(orgID, channel)
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
f, ok := s.frames[key]
|
||||
return f, ok, nil
|
||||
}
|
97
pkg/services/live/pipeline/goja_expression.go
Normal file
97
pkg/services/live/pipeline/goja_expression.go
Normal file
@ -0,0 +1,97 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/dop251/goja/parser"
|
||||
)
|
||||
|
||||
func getRuntime(payload []byte) (*gojaRuntime, error) {
|
||||
vm := goja.New()
|
||||
vm.SetMaxCallStackSize(64)
|
||||
vm.SetParserOptions(parser.WithDisableSourceMaps)
|
||||
r := &gojaRuntime{vm}
|
||||
err := r.init(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
type gojaRuntime struct {
|
||||
vm *goja.Runtime
|
||||
}
|
||||
|
||||
// Parse JSON once.
|
||||
func (r *gojaRuntime) init(payload []byte) error {
|
||||
err := r.vm.Set("__body", string(payload))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = r.runString(`var x = JSON.parse(__body)`)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *gojaRuntime) runString(script string) (goja.Value, error) {
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-doneCh:
|
||||
return
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
// Some ideas to prevent misuse of scripts:
|
||||
// * parse/validate scripts on save
|
||||
// * block scripts after several timeouts in a row
|
||||
// * block scripts on malformed returned error
|
||||
// * limit total quota of time for scripts
|
||||
// * maybe allow only one statement, reject scripts with cycles and functions.
|
||||
r.vm.Interrupt(errors.New("timeout"))
|
||||
}
|
||||
}()
|
||||
defer close(doneCh)
|
||||
return r.vm.RunString(script)
|
||||
}
|
||||
|
||||
func (r *gojaRuntime) getBool(script string) (bool, error) {
|
||||
v, err := r.runString(script)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
num, ok := v.Export().(bool)
|
||||
if !ok {
|
||||
return false, errors.New("unexpected return value")
|
||||
}
|
||||
return num, nil
|
||||
}
|
||||
|
||||
func (r *gojaRuntime) getString(script string) (string, error) {
|
||||
v, err := r.runString(script)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
exportedVal := v.Export()
|
||||
stringVal, ok := exportedVal.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unexpected return value: %v (%T), script: %s", exportedVal, exportedVal, script)
|
||||
}
|
||||
return stringVal, nil
|
||||
}
|
||||
|
||||
func (r *gojaRuntime) getFloat64(script string) (float64, error) {
|
||||
v, err := r.runString(script)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
exported := v.Export()
|
||||
switch v := exported.(type) {
|
||||
case float64:
|
||||
return v, nil
|
||||
case int64:
|
||||
return float64(v), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unexpected return value: %T", exported)
|
||||
}
|
||||
}
|
55
pkg/services/live/pipeline/goja_expression_test.go
Normal file
55
pkg/services/live/pipeline/goja_expression_test.go
Normal file
@ -0,0 +1,55 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dop251/goja"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGojaGetBool(t *testing.T) {
|
||||
r, err := getRuntime([]byte(`{"ax": true}`))
|
||||
require.NoError(t, err)
|
||||
val, err := r.getBool("x.ax")
|
||||
require.NoError(t, err)
|
||||
require.True(t, val)
|
||||
}
|
||||
|
||||
func TestGojaGetFloat64(t *testing.T) {
|
||||
r, err := getRuntime([]byte(`{"ax": 3}`))
|
||||
require.NoError(t, err)
|
||||
val, err := r.getFloat64("x.ax")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3.0, val)
|
||||
}
|
||||
|
||||
func TestGojaGetString(t *testing.T) {
|
||||
r, err := getRuntime([]byte(`{"ax": "test"}`))
|
||||
require.NoError(t, err)
|
||||
val, err := r.getString("x.ax")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "test", val)
|
||||
}
|
||||
|
||||
func TestGojaInvalidReturnValue(t *testing.T) {
|
||||
r, err := getRuntime([]byte(`{"ax": "test"}`))
|
||||
require.NoError(t, err)
|
||||
_, err = r.getBool("x.ax")
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestGojaIInterrupt(t *testing.T) {
|
||||
r, err := getRuntime([]byte(`{}`))
|
||||
require.NoError(t, err)
|
||||
_, err = r.getBool("while (true) {}")
|
||||
var interrupted *goja.InterruptedError
|
||||
require.ErrorAs(t, err, &interrupted)
|
||||
}
|
||||
|
||||
func TestGojaIMaxStack(t *testing.T) {
|
||||
r, err := getRuntime([]byte(`{}`))
|
||||
require.NoError(t, err)
|
||||
_, err = r.getBool("function test() {test()}; test();")
|
||||
// TODO: strange <nil> error returned here, need to investigate what is it.
|
||||
require.Error(t, err)
|
||||
}
|
135
pkg/services/live/pipeline/json_to_frame.go
Normal file
135
pkg/services/live/pipeline/json_to_frame.go
Normal file
@ -0,0 +1,135 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
type doc struct {
|
||||
path []string
|
||||
iterator *jsoniter.Iterator
|
||||
fields []*data.Field
|
||||
fieldNames map[string]struct{}
|
||||
fieldTips map[string]Field
|
||||
}
|
||||
|
||||
func (d *doc) next() error {
|
||||
switch d.iterator.WhatIsNext() {
|
||||
case jsoniter.StringValue:
|
||||
d.addString(d.iterator.ReadString())
|
||||
case jsoniter.NumberValue:
|
||||
d.addNumber(d.iterator.ReadFloat64())
|
||||
case jsoniter.BoolValue:
|
||||
d.addBool(d.iterator.ReadBool())
|
||||
case jsoniter.NilValue:
|
||||
d.addNil()
|
||||
d.iterator.ReadNil()
|
||||
case jsoniter.ArrayValue:
|
||||
index := 0
|
||||
size := len(d.path)
|
||||
for d.iterator.ReadArray() {
|
||||
d.path = append(d.path, fmt.Sprintf("[%d]", index))
|
||||
err := d.next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.path = d.path[:size]
|
||||
index++
|
||||
}
|
||||
case jsoniter.ObjectValue:
|
||||
size := len(d.path)
|
||||
for fname := d.iterator.ReadObject(); fname != ""; fname = d.iterator.ReadObject() {
|
||||
if size > 0 {
|
||||
d.path = append(d.path, ".")
|
||||
}
|
||||
d.path = append(d.path, fname)
|
||||
err := d.next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.path = d.path[:size]
|
||||
}
|
||||
case jsoniter.InvalidValue:
|
||||
return fmt.Errorf("invalid value")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *doc) key() string {
|
||||
return strings.Join(d.path, "")
|
||||
}
|
||||
|
||||
func (d *doc) addString(v string) {
|
||||
f := data.NewFieldFromFieldType(data.FieldTypeNullableString, 1)
|
||||
f.Name = d.key()
|
||||
f.SetConcrete(0, v)
|
||||
d.fields = append(d.fields, f)
|
||||
d.fieldNames[d.key()] = struct{}{}
|
||||
}
|
||||
|
||||
func (d *doc) addNumber(v float64) {
|
||||
f := data.NewFieldFromFieldType(data.FieldTypeNullableFloat64, 1)
|
||||
f.Name = d.key()
|
||||
f.SetConcrete(0, v)
|
||||
d.fields = append(d.fields, f)
|
||||
d.fieldNames[d.key()] = struct{}{}
|
||||
}
|
||||
|
||||
func (d *doc) addBool(v bool) {
|
||||
f := data.NewFieldFromFieldType(data.FieldTypeNullableBool, 1)
|
||||
f.Name = d.key()
|
||||
f.SetConcrete(0, v)
|
||||
d.fields = append(d.fields, f)
|
||||
d.fieldNames[d.key()] = struct{}{}
|
||||
}
|
||||
|
||||
func (d *doc) addNil() {
|
||||
if tip, ok := d.fieldTips[d.key()]; ok {
|
||||
f := data.NewFieldFromFieldType(tip.Type, 1)
|
||||
f.Name = d.key()
|
||||
f.Set(0, nil)
|
||||
d.fields = append(d.fields, f)
|
||||
d.fieldNames[d.key()] = struct{}{}
|
||||
} else {
|
||||
logger.Warn("Skip nil field", "key", d.key())
|
||||
}
|
||||
}
|
||||
|
||||
func jsonDocToFrame(name string, body []byte, fields map[string]Field, nowTimeFunc func() time.Time) (*data.Frame, error) {
|
||||
d := doc{
|
||||
iterator: jsoniter.ParseBytes(jsoniter.ConfigDefault, body),
|
||||
path: make([]string, 0),
|
||||
fieldTips: fields,
|
||||
fieldNames: map[string]struct{}{},
|
||||
}
|
||||
|
||||
f := data.NewFieldFromFieldType(data.FieldTypeTime, 1)
|
||||
f.Set(0, nowTimeFunc())
|
||||
d.fields = append(d.fields, f)
|
||||
|
||||
err := d.next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(d.fields) < 2 {
|
||||
return nil, fmt.Errorf("no fields found")
|
||||
}
|
||||
|
||||
for name, tip := range fields {
|
||||
if _, ok := d.fieldNames[name]; ok {
|
||||
continue
|
||||
}
|
||||
f := data.NewFieldFromFieldType(tip.Type, 1)
|
||||
f.Name = name
|
||||
f.Set(0, nil)
|
||||
f.Config = tip.Config
|
||||
d.fields = append(d.fields, f)
|
||||
}
|
||||
|
||||
return data.NewFrame(name, d.fields...), nil
|
||||
}
|
7
pkg/services/live/pipeline/logger.go
Normal file
7
pkg/services/live/pipeline/logger.go
Normal file
@ -0,0 +1,7 @@
|
||||
package pipeline
|
||||
|
||||
import "github.com/grafana/grafana/pkg/infra/log"
|
||||
|
||||
var (
|
||||
logger = log.New("live.pipeline")
|
||||
)
|
92
pkg/services/live/pipeline/output_changelog.go
Normal file
92
pkg/services/live/pipeline/output_changelog.go
Normal file
@ -0,0 +1,92 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
type ChangeLogOutputConfig struct {
|
||||
FieldName string `json:"fieldName"`
|
||||
Channel string `json:"channel"`
|
||||
}
|
||||
|
||||
// ChangeLogOutput can monitor value changes of the specified field and output
|
||||
// special change frame to the configured channel.
|
||||
type ChangeLogOutput struct {
|
||||
frameStorage FrameGetSetter
|
||||
config ChangeLogOutputConfig
|
||||
}
|
||||
|
||||
func NewChangeLogOutput(frameStorage FrameGetSetter, config ChangeLogOutputConfig) *ChangeLogOutput {
|
||||
return &ChangeLogOutput{frameStorage: frameStorage, config: config}
|
||||
}
|
||||
|
||||
func (l ChangeLogOutput) Output(_ context.Context, vars OutputVars, frame *data.Frame) ([]*ChannelFrame, error) {
|
||||
previousFrame, previousFrameOK, err := l.frameStorage.Get(vars.OrgID, l.config.Channel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fieldName := l.config.FieldName
|
||||
|
||||
previousFrameFieldIndex := -1
|
||||
if previousFrameOK {
|
||||
for i, f := range previousFrame.Fields {
|
||||
if f.Name == fieldName {
|
||||
previousFrameFieldIndex = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
currentFrameFieldIndex := -1
|
||||
for i, f := range frame.Fields {
|
||||
if f.Name == fieldName {
|
||||
currentFrameFieldIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
var previousValue interface{}
|
||||
if previousFrameFieldIndex >= 0 {
|
||||
// Take last value for the field.
|
||||
previousValue = previousFrame.Fields[previousFrameFieldIndex].At(previousFrame.Fields[previousFrameFieldIndex].Len() - 1)
|
||||
}
|
||||
|
||||
fTime := data.NewFieldFromFieldType(data.FieldTypeTime, 0)
|
||||
fTime.Name = "time"
|
||||
f1 := data.NewFieldFromFieldType(frame.Fields[currentFrameFieldIndex].Type(), 0)
|
||||
f1.Name = "old"
|
||||
f2 := data.NewFieldFromFieldType(frame.Fields[currentFrameFieldIndex].Type(), 0)
|
||||
f2.Name = "new"
|
||||
|
||||
if currentFrameFieldIndex >= 0 {
|
||||
for i := 0; i < frame.Fields[currentFrameFieldIndex].Len(); i++ {
|
||||
currentValue := frame.Fields[currentFrameFieldIndex].At(i)
|
||||
if !reflect.DeepEqual(
|
||||
previousValue,
|
||||
currentValue,
|
||||
) {
|
||||
fTime.Append(time.Now())
|
||||
f1.Append(previousValue)
|
||||
f2.Append(currentValue)
|
||||
previousValue = currentValue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fTime.Len() > 0 {
|
||||
changeFrame := data.NewFrame("change", fTime, f1, f2)
|
||||
err := l.frameStorage.Set(vars.OrgID, l.config.Channel, frame)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*ChannelFrame{{
|
||||
Channel: l.config.Channel,
|
||||
Frame: changeFrame,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
return nil, l.frameStorage.Set(vars.OrgID, l.config.Channel, frame)
|
||||
}
|
90
pkg/services/live/pipeline/output_changelog_test.go
Normal file
90
pkg/services/live/pipeline/output_changelog_test.go
Normal file
@ -0,0 +1,90 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestChangeLogOutput_NoPreviousFrame_SingleRow(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
mockStorage := NewMockFrameGetSetter(mockCtrl)
|
||||
|
||||
mockStorage.EXPECT().Get(gomock.Any(), gomock.Any()).DoAndReturn(func(orgID int64, channel string) (*data.Frame, bool, error) {
|
||||
return nil, false, nil
|
||||
})
|
||||
|
||||
mockStorage.EXPECT().Set(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
|
||||
|
||||
outputter := NewChangeLogOutput(mockStorage, ChangeLogOutputConfig{
|
||||
FieldName: "test",
|
||||
Channel: "stream/test/no_previous_frame",
|
||||
})
|
||||
|
||||
f1 := data.NewField("time", nil, make([]time.Time, 1))
|
||||
f1.Set(0, time.Now())
|
||||
|
||||
f2 := data.NewField("test", nil, make([]*float64, 1))
|
||||
f2.SetConcrete(0, 20.0)
|
||||
|
||||
frame := data.NewFrame("test", f1, f2)
|
||||
|
||||
channelFrames, err := outputter.Output(context.Background(), OutputVars{}, frame)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, channelFrames, 1)
|
||||
changeFrame := channelFrames[0].Frame
|
||||
require.Len(t, changeFrame.Fields, 3)
|
||||
var x *float64
|
||||
var y = 20.0
|
||||
require.Equal(t, x, changeFrame.Fields[1].At(0).(*float64))
|
||||
require.Equal(t, &y, changeFrame.Fields[2].At(0))
|
||||
}
|
||||
|
||||
func TestChangeLogOutput_NoPreviousFrame_MultipleRows(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
mockStorage := NewMockFrameGetSetter(mockCtrl)
|
||||
|
||||
mockStorage.EXPECT().Get(gomock.Any(), gomock.Any()).DoAndReturn(func(orgID int64, channel string) (*data.Frame, bool, error) {
|
||||
return nil, false, nil
|
||||
}).Times(1)
|
||||
|
||||
mockStorage.EXPECT().Set(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
|
||||
|
||||
outputter := NewChangeLogOutput(mockStorage, ChangeLogOutputConfig{
|
||||
FieldName: "test",
|
||||
Channel: "stream/test/no_previous_frame",
|
||||
})
|
||||
|
||||
f1 := data.NewField("time", nil, make([]time.Time, 2))
|
||||
f1.Set(0, time.Now())
|
||||
f1.Set(1, time.Now())
|
||||
|
||||
f2 := data.NewField("test", nil, make([]*float64, 2))
|
||||
f2.SetConcrete(0, 5.0)
|
||||
f2.SetConcrete(1, 20.0)
|
||||
|
||||
frame := data.NewFrame("test", f1, f2)
|
||||
|
||||
channelFrames, err := outputter.Output(context.Background(), OutputVars{}, frame)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, channelFrames, 1)
|
||||
changeFrame := channelFrames[0].Frame
|
||||
require.Len(t, changeFrame.Fields, 3)
|
||||
var x *float64
|
||||
var y = 5.0
|
||||
require.Equal(t, x, changeFrame.Fields[1].At(0).(*float64))
|
||||
require.Equal(t, &y, changeFrame.Fields[2].At(0))
|
||||
var z = 5.0
|
||||
var v = 20.0
|
||||
require.Equal(t, &z, changeFrame.Fields[1].At(1).(*float64))
|
||||
require.Equal(t, &v, changeFrame.Fields[2].At(1))
|
||||
}
|
27
pkg/services/live/pipeline/output_conditional.go
Normal file
27
pkg/services/live/pipeline/output_conditional.go
Normal file
@ -0,0 +1,27 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
type ConditionalOutput struct {
|
||||
Condition ConditionChecker
|
||||
Outputter Outputter
|
||||
}
|
||||
|
||||
func NewConditionalOutput(condition ConditionChecker, outputter Outputter) *ConditionalOutput {
|
||||
return &ConditionalOutput{Condition: condition, Outputter: outputter}
|
||||
}
|
||||
|
||||
func (l ConditionalOutput) Output(ctx context.Context, vars OutputVars, frame *data.Frame) ([]*ChannelFrame, error) {
|
||||
ok, err := l.Condition.CheckCondition(ctx, frame)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return l.Outputter.Output(ctx, vars, frame)
|
||||
}
|
38
pkg/services/live/pipeline/output_local_subscribers.go
Normal file
38
pkg/services/live/pipeline/output_local_subscribers.go
Normal file
@ -0,0 +1,38 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/live/orgchannel"
|
||||
|
||||
"github.com/centrifugal/centrifuge"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
type LocalSubscribersOutput struct {
|
||||
// TODO: refactor to depend on interface (avoid Centrifuge dependency here).
|
||||
node *centrifuge.Node
|
||||
}
|
||||
|
||||
func NewLocalSubscribersOutput(node *centrifuge.Node) *LocalSubscribersOutput {
|
||||
return &LocalSubscribersOutput{node: node}
|
||||
}
|
||||
|
||||
func (l *LocalSubscribersOutput) Output(_ context.Context, vars OutputVars, frame *data.Frame) ([]*ChannelFrame, error) {
|
||||
channelID := vars.Channel
|
||||
channel := orgchannel.PrependOrgID(vars.OrgID, channelID)
|
||||
frameJSON, err := json.Marshal(frame)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := ¢rifuge.Publication{
|
||||
Data: frameJSON,
|
||||
}
|
||||
err = l.node.Hub().BroadcastPublication(channel, pub, centrifuge.StreamPosition{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error publishing %s: %w", string(frameJSON), err)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
26
pkg/services/live/pipeline/output_managed_stream.go
Normal file
26
pkg/services/live/pipeline/output_managed_stream.go
Normal file
@ -0,0 +1,26 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/live/managedstream"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
type ManagedStreamOutput struct {
|
||||
managedStream *managedstream.Runner
|
||||
}
|
||||
|
||||
func NewManagedStreamOutput(managedStream *managedstream.Runner) *ManagedStreamOutput {
|
||||
return &ManagedStreamOutput{managedStream: managedStream}
|
||||
}
|
||||
|
||||
func (l *ManagedStreamOutput) Output(_ context.Context, vars OutputVars, frame *data.Frame) ([]*ChannelFrame, error) {
|
||||
stream, err := l.managedStream.GetOrCreateStream(vars.OrgID, vars.Scope, vars.Namespace)
|
||||
if err != nil {
|
||||
logger.Error("Error getting stream", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return nil, stream.Push(vars.Path, frame)
|
||||
}
|
30
pkg/services/live/pipeline/output_multiple.go
Normal file
30
pkg/services/live/pipeline/output_multiple.go
Normal file
@ -0,0 +1,30 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
// MultipleOutput can combine several Outputter and
|
||||
// execute them sequentially.
|
||||
type MultipleOutput struct {
|
||||
Outputters []Outputter
|
||||
}
|
||||
|
||||
func (m MultipleOutput) Output(ctx context.Context, vars OutputVars, frame *data.Frame) ([]*ChannelFrame, error) {
|
||||
var frames []*ChannelFrame
|
||||
for _, out := range m.Outputters {
|
||||
f, err := out.Output(ctx, vars, frame)
|
||||
if err != nil {
|
||||
logger.Error("Error outputting frame", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
frames = append(frames, f...)
|
||||
}
|
||||
return frames, nil
|
||||
}
|
||||
|
||||
func NewMultipleOutput(outputters ...Outputter) *MultipleOutput {
|
||||
return &MultipleOutput{Outputters: outputters}
|
||||
}
|
33
pkg/services/live/pipeline/output_redirect.go
Normal file
33
pkg/services/live/pipeline/output_redirect.go
Normal file
@ -0,0 +1,33 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
// RedirectOutputConfig ...
|
||||
type RedirectOutputConfig struct {
|
||||
Channel string `json:"channel"`
|
||||
}
|
||||
|
||||
// RedirectOutput passes processing control to the rule defined
|
||||
// for a configured channel.
|
||||
type RedirectOutput struct {
|
||||
config RedirectOutputConfig
|
||||
}
|
||||
|
||||
func NewRedirectOutput(config RedirectOutputConfig) *RedirectOutput {
|
||||
return &RedirectOutput{config: config}
|
||||
}
|
||||
|
||||
func (l *RedirectOutput) Output(_ context.Context, vars OutputVars, frame *data.Frame) ([]*ChannelFrame, error) {
|
||||
if vars.Channel == l.config.Channel {
|
||||
return nil, fmt.Errorf("redirect to the same channel: %s", l.config.Channel)
|
||||
}
|
||||
return []*ChannelFrame{{
|
||||
Channel: l.config.Channel,
|
||||
Frame: frame,
|
||||
}}, nil
|
||||
}
|
72
pkg/services/live/pipeline/output_remote_write.go
Normal file
72
pkg/services/live/pipeline/output_remote_write.go
Normal file
@ -0,0 +1,72 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana/pkg/services/live/remotewrite"
|
||||
)
|
||||
|
||||
type RemoteWriteConfig struct {
|
||||
// Endpoint to send streaming frames to.
|
||||
Endpoint string `json:"endpoint"`
|
||||
// User is a user for remote write request.
|
||||
User string `json:"user"`
|
||||
// Password for remote write endpoint.
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
type RemoteWriteOutput struct {
|
||||
config RemoteWriteConfig
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
func NewRemoteWriteOutput(config RemoteWriteConfig) *RemoteWriteOutput {
|
||||
return &RemoteWriteOutput{
|
||||
config: config,
|
||||
httpClient: &http.Client{Timeout: 2 * time.Second},
|
||||
}
|
||||
}
|
||||
|
||||
func (r RemoteWriteOutput) Output(_ context.Context, _ OutputVars, frame *data.Frame) ([]*ChannelFrame, error) {
|
||||
if r.config.Endpoint == "" {
|
||||
logger.Debug("Skip sending to remote write: no url")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Use remote write for a stream.
|
||||
remoteWriteData, err := remotewrite.SerializeLabelsColumn(frame)
|
||||
if err != nil {
|
||||
logger.Error("Error serializing to remote write format", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Debug("Sending to remote write endpoint", "url", r.config.Endpoint, "bodyLength", len(remoteWriteData))
|
||||
req, err := http.NewRequest(http.MethodPost, r.config.Endpoint, bytes.NewReader(remoteWriteData))
|
||||
if err != nil {
|
||||
logger.Error("Error constructing remote write request", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-protobuf")
|
||||
req.Header.Set("Content-Encoding", "snappy")
|
||||
req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
|
||||
req.SetBasicAuth(r.config.User, r.config.Password)
|
||||
|
||||
started := time.Now()
|
||||
resp, err := r.httpClient.Do(req)
|
||||
if err != nil {
|
||||
logger.Error("Error sending remote write request", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logger.Error("Unexpected response code from remote write endpoint", "code", resp.StatusCode)
|
||||
return nil, errors.New("unexpected response code from remote write endpoint")
|
||||
}
|
||||
logger.Debug("Successfully sent to remote write endpoint", "url", r.config.Endpoint, "elapsed", time.Since(started))
|
||||
return nil, nil
|
||||
}
|
150
pkg/services/live/pipeline/output_threshold.go
Normal file
150
pkg/services/live/pipeline/output_threshold.go
Normal file
@ -0,0 +1,150 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
type ThresholdOutputConfig struct {
|
||||
FieldName string `json:"fieldName"`
|
||||
Channel string `json:"channel"`
|
||||
}
|
||||
|
||||
//go:generate mockgen -destination=output_threshold_mock.go -package=pipeline github.com/grafana/grafana/pkg/services/live/pipeline FrameGetSetter
|
||||
|
||||
type FrameGetSetter interface {
|
||||
Get(orgID int64, channel string) (*data.Frame, bool, error)
|
||||
Set(orgID int64, channel string, frame *data.Frame) error
|
||||
}
|
||||
|
||||
// ThresholdOutput can monitor threshold transitions of the specified field and output
|
||||
// special state frame to the configured channel.
|
||||
type ThresholdOutput struct {
|
||||
frameStorage FrameGetSetter
|
||||
config ThresholdOutputConfig
|
||||
}
|
||||
|
||||
func NewThresholdOutput(frameStorage FrameGetSetter, config ThresholdOutputConfig) *ThresholdOutput {
|
||||
return &ThresholdOutput{frameStorage: frameStorage, config: config}
|
||||
}
|
||||
|
||||
func (l *ThresholdOutput) Output(_ context.Context, vars OutputVars, frame *data.Frame) ([]*ChannelFrame, error) {
|
||||
if frame == nil {
|
||||
return nil, nil
|
||||
}
|
||||
previousFrame, previousFrameOk, err := l.frameStorage.Get(vars.OrgID, l.config.Channel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldName := l.config.FieldName
|
||||
|
||||
currentFrameFieldIndex := -1
|
||||
for i, f := range frame.Fields {
|
||||
if f.Name == fieldName {
|
||||
currentFrameFieldIndex = i
|
||||
}
|
||||
}
|
||||
if currentFrameFieldIndex < 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if frame.Fields[currentFrameFieldIndex].Config == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if frame.Fields[currentFrameFieldIndex].Config.Thresholds == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
mode := frame.Fields[currentFrameFieldIndex].Config.Thresholds.Mode
|
||||
if mode != data.ThresholdsModeAbsolute {
|
||||
return nil, fmt.Errorf("unsupported threshold mode: %s", mode)
|
||||
}
|
||||
|
||||
if len(frame.Fields[currentFrameFieldIndex].Config.Thresholds.Steps) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
previousFrameFieldIndex := -1
|
||||
if previousFrameOk {
|
||||
for i, f := range previousFrame.Fields {
|
||||
if f.Name == fieldName {
|
||||
previousFrameFieldIndex = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var previousState *string
|
||||
if previousFrameOk && previousFrameFieldIndex >= 0 {
|
||||
var previousThreshold data.Threshold
|
||||
value, ok := previousFrame.Fields[previousFrameFieldIndex].At(previousFrame.Fields[0].Len() - 1).(*float64)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
if value == nil {
|
||||
// TODO: what should we do here?
|
||||
return nil, nil
|
||||
}
|
||||
emptyState := ""
|
||||
previousState = &emptyState
|
||||
for _, threshold := range frame.Fields[currentFrameFieldIndex].Config.Thresholds.Steps {
|
||||
if *value >= float64(threshold.Value) {
|
||||
previousThreshold = threshold
|
||||
previousState = &previousThreshold.State
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
fTime := data.NewFieldFromFieldType(data.FieldTypeTime, 0)
|
||||
fTime.Name = "time"
|
||||
f1 := data.NewFieldFromFieldType(data.FieldTypeFloat64, 0)
|
||||
f1.Name = "value"
|
||||
f2 := data.NewFieldFromFieldType(data.FieldTypeString, 0)
|
||||
f2.Name = "state"
|
||||
f3 := data.NewFieldFromFieldType(data.FieldTypeString, 0)
|
||||
f3.Name = "color"
|
||||
|
||||
for i := 0; i < frame.Fields[currentFrameFieldIndex].Len(); i++ {
|
||||
// TODO: support other numeric types.
|
||||
value, ok := frame.Fields[currentFrameFieldIndex].At(i).(*float64)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
if value == nil {
|
||||
// TODO: what should we do here?
|
||||
return nil, nil
|
||||
}
|
||||
var currentThreshold data.Threshold
|
||||
for _, threshold := range frame.Fields[currentFrameFieldIndex].Config.Thresholds.Steps {
|
||||
if *value >= float64(threshold.Value) {
|
||||
currentThreshold = threshold
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if previousState == nil || currentThreshold.State != *previousState {
|
||||
fTime.Append(time.Now())
|
||||
f1.Append(*value)
|
||||
f2.Append(currentThreshold.State)
|
||||
f3.Append(currentThreshold.Color)
|
||||
previousState = ¤tThreshold.State
|
||||
}
|
||||
}
|
||||
|
||||
if fTime.Len() > 0 {
|
||||
stateFrame := data.NewFrame("state", fTime, f1, f2, f3)
|
||||
err := l.frameStorage.Set(vars.OrgID, l.config.Channel, frame)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*ChannelFrame{{
|
||||
Channel: l.config.Channel,
|
||||
Frame: stateFrame,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
return nil, l.frameStorage.Set(vars.OrgID, l.config.Channel, frame)
|
||||
}
|
65
pkg/services/live/pipeline/output_threshold_mock.go
Normal file
65
pkg/services/live/pipeline/output_threshold_mock.go
Normal file
@ -0,0 +1,65 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: github.com/grafana/grafana/pkg/services/live/pipeline (interfaces: FrameGetSetter)
|
||||
|
||||
// Package pipeline is a generated GoMock package.
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
data "github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
// MockFrameGetSetter is a mock of FrameGetSetter interface.
|
||||
type MockFrameGetSetter struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockFrameGetSetterMockRecorder
|
||||
}
|
||||
|
||||
// MockFrameGetSetterMockRecorder is the mock recorder for MockFrameGetSetter.
|
||||
type MockFrameGetSetterMockRecorder struct {
|
||||
mock *MockFrameGetSetter
|
||||
}
|
||||
|
||||
// NewMockFrameGetSetter creates a new mock instance.
|
||||
func NewMockFrameGetSetter(ctrl *gomock.Controller) *MockFrameGetSetter {
|
||||
mock := &MockFrameGetSetter{ctrl: ctrl}
|
||||
mock.recorder = &MockFrameGetSetterMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockFrameGetSetter) EXPECT() *MockFrameGetSetterMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Get mocks base method.
|
||||
func (m *MockFrameGetSetter) Get(arg0 int64, arg1 string) (*data.Frame, bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", arg0, arg1)
|
||||
ret0, _ := ret[0].(*data.Frame)
|
||||
ret1, _ := ret[1].(bool)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// Get indicates an expected call of Get.
|
||||
func (mr *MockFrameGetSetterMockRecorder) Get(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockFrameGetSetter)(nil).Get), arg0, arg1)
|
||||
}
|
||||
|
||||
// Set mocks base method.
|
||||
func (m *MockFrameGetSetter) Set(arg0 int64, arg1 string, arg2 *data.Frame) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Set", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Set indicates an expected call of Set.
|
||||
func (mr *MockFrameGetSetterMockRecorder) Set(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockFrameGetSetter)(nil).Set), arg0, arg1, arg2)
|
||||
}
|
254
pkg/services/live/pipeline/output_threshold_test.go
Normal file
254
pkg/services/live/pipeline/output_threshold_test.go
Normal file
@ -0,0 +1,254 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestThresholdOutput_Output(t *testing.T) {
|
||||
type fields struct {
|
||||
frameStorage FrameGetSetter
|
||||
config ThresholdOutputConfig
|
||||
}
|
||||
type args struct {
|
||||
in0 context.Context
|
||||
vars OutputVars
|
||||
frame *data.Frame
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil_input_frame",
|
||||
fields: fields{
|
||||
frameStorage: nil,
|
||||
config: ThresholdOutputConfig{
|
||||
Channel: "test",
|
||||
},
|
||||
},
|
||||
args: args{in0: context.Background(), vars: OutputVars{}, frame: nil},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
l := &ThresholdOutput{
|
||||
frameStorage: tt.fields.frameStorage,
|
||||
config: tt.fields.config,
|
||||
}
|
||||
if _, err := l.Output(tt.args.in0, tt.args.vars, tt.args.frame); (err != nil) != tt.wantErr {
|
||||
t.Errorf("Output() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestThresholdOutput_NoPreviousFrame_SingleRow(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
mockStorage := NewMockFrameGetSetter(mockCtrl)
|
||||
|
||||
mockStorage.EXPECT().Get(gomock.Any(), gomock.Any()).DoAndReturn(func(orgID int64, channel string) (*data.Frame, bool, error) {
|
||||
return nil, false, nil
|
||||
})
|
||||
|
||||
mockStorage.EXPECT().Set(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
|
||||
|
||||
outputter := NewThresholdOutput(mockStorage, ThresholdOutputConfig{
|
||||
FieldName: "test",
|
||||
Channel: "stream/test/no_previous_frame",
|
||||
})
|
||||
|
||||
f1 := data.NewField("time", nil, make([]time.Time, 1))
|
||||
f1.Set(0, time.Now())
|
||||
|
||||
f2 := data.NewField("test", nil, make([]*float64, 1))
|
||||
f2.SetConcrete(0, 20.0)
|
||||
f2.Config = &data.FieldConfig{
|
||||
Thresholds: &data.ThresholdsConfig{
|
||||
Mode: data.ThresholdsModeAbsolute,
|
||||
Steps: []data.Threshold{
|
||||
{
|
||||
Value: 10,
|
||||
State: "normal",
|
||||
Color: "green",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
frame := data.NewFrame("test", f1, f2)
|
||||
|
||||
channelFrames, err := outputter.Output(context.Background(), OutputVars{}, frame)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, channelFrames, 1)
|
||||
stateFrame := channelFrames[0].Frame
|
||||
require.Len(t, stateFrame.Fields, 4)
|
||||
require.Equal(t, 20.0, stateFrame.Fields[1].At(0))
|
||||
require.Equal(t, "normal", stateFrame.Fields[2].At(0))
|
||||
require.Equal(t, "green", stateFrame.Fields[3].At(0))
|
||||
}
|
||||
|
||||
func TestThresholdOutput_NoPreviousFrame_MultipleRows(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
mockStorage := NewMockFrameGetSetter(mockCtrl)
|
||||
|
||||
mockStorage.EXPECT().Get(gomock.Any(), gomock.Any()).DoAndReturn(func(orgID int64, channel string) (*data.Frame, bool, error) {
|
||||
return nil, false, nil
|
||||
}).Times(1)
|
||||
|
||||
mockStorage.EXPECT().Set(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
|
||||
|
||||
outputter := NewThresholdOutput(mockStorage, ThresholdOutputConfig{
|
||||
FieldName: "test",
|
||||
Channel: "stream/test/no_previous_frame",
|
||||
})
|
||||
|
||||
f1 := data.NewField("time", nil, make([]time.Time, 2))
|
||||
f1.Set(0, time.Now())
|
||||
f1.Set(1, time.Now())
|
||||
|
||||
f2 := data.NewField("test", nil, make([]*float64, 2))
|
||||
f2.SetConcrete(0, 5.0)
|
||||
f2.SetConcrete(1, 20.0)
|
||||
|
||||
f2.Config = &data.FieldConfig{
|
||||
Thresholds: &data.ThresholdsConfig{
|
||||
Mode: data.ThresholdsModeAbsolute,
|
||||
Steps: []data.Threshold{
|
||||
{
|
||||
Value: 10,
|
||||
State: "normal",
|
||||
Color: "green",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
frame := data.NewFrame("test", f1, f2)
|
||||
|
||||
channelFrames, err := outputter.Output(context.Background(), OutputVars{}, frame)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, channelFrames, 1)
|
||||
|
||||
stateFrame := channelFrames[0].Frame
|
||||
|
||||
require.Len(t, stateFrame.Fields, 4)
|
||||
require.Equal(t, 5.0, stateFrame.Fields[1].At(0))
|
||||
require.Equal(t, "", stateFrame.Fields[2].At(0))
|
||||
require.Equal(t, "", stateFrame.Fields[3].At(0))
|
||||
|
||||
require.Equal(t, 20.0, stateFrame.Fields[1].At(1))
|
||||
require.Equal(t, "normal", stateFrame.Fields[2].At(1))
|
||||
require.Equal(t, "green", stateFrame.Fields[3].At(1))
|
||||
}
|
||||
|
||||
func TestThresholdOutput_WithPreviousFrame_SingleRow(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
mockStorage := NewMockFrameGetSetter(mockCtrl)
|
||||
|
||||
mockStorage.EXPECT().Get(gomock.Any(), gomock.Any()).DoAndReturn(func(orgID int64, channel string) (*data.Frame, bool, error) {
|
||||
f1 := data.NewField("time", nil, make([]time.Time, 1))
|
||||
f1.Set(0, time.Now())
|
||||
f2 := data.NewField("test", nil, make([]*float64, 1))
|
||||
f2.SetConcrete(0, 20.0)
|
||||
frame := data.NewFrame("test", f1, f2)
|
||||
return frame, true, nil
|
||||
}).Times(1)
|
||||
|
||||
mockStorage.EXPECT().Set(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
|
||||
|
||||
outputter := NewThresholdOutput(mockStorage, ThresholdOutputConfig{
|
||||
FieldName: "test",
|
||||
Channel: "stream/test/with_previous_frame",
|
||||
})
|
||||
|
||||
f1 := data.NewField("time", nil, make([]time.Time, 1))
|
||||
f1.Set(0, time.Now())
|
||||
|
||||
f2 := data.NewField("test", nil, make([]*float64, 1))
|
||||
f2.SetConcrete(0, 20.0)
|
||||
|
||||
f2.Config = &data.FieldConfig{
|
||||
Thresholds: &data.ThresholdsConfig{
|
||||
Mode: data.ThresholdsModeAbsolute,
|
||||
Steps: []data.Threshold{
|
||||
{
|
||||
Value: 10,
|
||||
State: "normal",
|
||||
Color: "green",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
frame := data.NewFrame("test", f1, f2)
|
||||
|
||||
channelFrames, err := outputter.Output(context.Background(), OutputVars{}, frame)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, channelFrames, 0)
|
||||
}
|
||||
|
||||
func TestThresholdOutput_WithPreviousFrame_MultipleRows(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
mockStorage := NewMockFrameGetSetter(mockCtrl)
|
||||
|
||||
mockStorage.EXPECT().Get(gomock.Any(), gomock.Any()).DoAndReturn(func(orgID int64, channel string) (*data.Frame, bool, error) {
|
||||
f1 := data.NewField("time", nil, make([]time.Time, 1))
|
||||
f1.Set(0, time.Now())
|
||||
f2 := data.NewField("test", nil, make([]*float64, 1))
|
||||
f2.SetConcrete(0, 20.0)
|
||||
frame := data.NewFrame("test", f1, f2)
|
||||
return frame, true, nil
|
||||
}).Times(1)
|
||||
|
||||
mockStorage.EXPECT().Set(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
|
||||
|
||||
outputter := NewThresholdOutput(mockStorage, ThresholdOutputConfig{
|
||||
FieldName: "test",
|
||||
Channel: "stream/test/with_previous_frame",
|
||||
})
|
||||
|
||||
f1 := data.NewField("time", nil, make([]time.Time, 2))
|
||||
f1.Set(0, time.Now())
|
||||
f1.Set(1, time.Now())
|
||||
|
||||
f2 := data.NewField("test", nil, make([]*float64, 2))
|
||||
f2.SetConcrete(0, 5.0)
|
||||
f2.SetConcrete(1, 20.0)
|
||||
|
||||
f2.Config = &data.FieldConfig{
|
||||
Thresholds: &data.ThresholdsConfig{
|
||||
Mode: data.ThresholdsModeAbsolute,
|
||||
Steps: []data.Threshold{
|
||||
{
|
||||
Value: 10,
|
||||
State: "normal",
|
||||
Color: "green",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
frame := data.NewFrame("test", f1, f2)
|
||||
|
||||
channelFrames, err := outputter.Output(context.Background(), OutputVars{}, frame)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, channelFrames, 1)
|
||||
}
|
234
pkg/services/live/pipeline/pipeline.go
Normal file
234
pkg/services/live/pipeline/pipeline.go
Normal file
@ -0,0 +1,234 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/live"
|
||||
)
|
||||
|
||||
// ChannelFrame is a wrapper over data.Frame with additional channel information.
|
||||
// Channel is used for rule routing, if the channel is empty then frame processing
|
||||
// will try to take current rule Processor and Outputter. If channel is not empty
|
||||
// then frame processing will be redirected to a corresponding channel rule.
|
||||
// TODO: avoid recursion, increment a counter while frame travels over pipeline steps, make it configurable.
|
||||
type ChannelFrame struct {
|
||||
Channel string
|
||||
Frame *data.Frame
|
||||
}
|
||||
|
||||
// Vars has some helpful things pipeline entities could use.
|
||||
type Vars struct {
|
||||
OrgID int64
|
||||
Channel string
|
||||
Scope string
|
||||
Namespace string
|
||||
Path string
|
||||
}
|
||||
|
||||
// ProcessorVars has some helpful things Processor entities could use.
|
||||
type ProcessorVars struct {
|
||||
Vars
|
||||
}
|
||||
|
||||
// OutputVars has some helpful things Outputter entities could use.
|
||||
type OutputVars struct {
|
||||
ProcessorVars
|
||||
}
|
||||
|
||||
// Converter converts raw bytes to slice of ChannelFrame. Each element
|
||||
// of resulting slice will be then individually processed and outputted
|
||||
// according configured channel rules.
|
||||
type Converter interface {
|
||||
Convert(ctx context.Context, vars Vars, body []byte) ([]*ChannelFrame, error)
|
||||
}
|
||||
|
||||
// Processor can modify data.Frame in a custom way before it will be outputted.
|
||||
type Processor interface {
|
||||
Process(ctx context.Context, vars ProcessorVars, frame *data.Frame) (*data.Frame, error)
|
||||
}
|
||||
|
||||
// Outputter outputs data.Frame to a custom destination. Or simply
|
||||
// do nothing if some conditions not met.
|
||||
type Outputter interface {
|
||||
Output(ctx context.Context, vars OutputVars, frame *data.Frame) ([]*ChannelFrame, error)
|
||||
}
|
||||
|
||||
// LiveChannelRule is an in-memory representation of each specific rule, with Converter, Processor
|
||||
// and Outputter to be executed by Pipeline.
|
||||
type LiveChannelRule struct {
|
||||
OrgId int64
|
||||
Pattern string
|
||||
Converter Converter
|
||||
Processor Processor
|
||||
Outputter Outputter
|
||||
}
|
||||
|
||||
// Label ...
|
||||
type Label struct {
|
||||
Name string `json:"name"`
|
||||
Value string `json:"value"` // Can be JSONPath or Goja script.
|
||||
}
|
||||
|
||||
// Field description.
|
||||
type Field struct {
|
||||
Name string `json:"name"`
|
||||
Type data.FieldType `json:"type"`
|
||||
Value string `json:"value"` // Can be JSONPath or Goja script.
|
||||
Labels []Label `json:"labels,omitempty"`
|
||||
Config *data.FieldConfig `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
type ChannelRuleGetter interface {
|
||||
Get(orgID int64, channel string) (*LiveChannelRule, bool, error)
|
||||
}
|
||||
|
||||
// Pipeline allows processing custom input data according to user-defined rules.
|
||||
// This includes:
|
||||
// * transforming custom input to data.Frame objects
|
||||
// * do some processing on these frames
|
||||
// * output resulting frames to various destinations.
|
||||
type Pipeline struct {
|
||||
ruleGetter ChannelRuleGetter
|
||||
}
|
||||
|
||||
// New creates new Pipeline.
|
||||
func New(ruleGetter ChannelRuleGetter) (*Pipeline, error) {
|
||||
logger.Info("Live pipeline initialization")
|
||||
p := &Pipeline{
|
||||
ruleGetter: ruleGetter,
|
||||
}
|
||||
if os.Getenv("GF_LIVE_PIPELINE_DEV") != "" {
|
||||
go postTestData() // TODO: temporary for development, remove before merge.
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) Get(orgID int64, channel string) (*LiveChannelRule, bool, error) {
|
||||
return p.ruleGetter.Get(orgID, channel)
|
||||
}
|
||||
|
||||
func (p *Pipeline) ProcessInput(ctx context.Context, orgID int64, channelID string, body []byte) (bool, error) {
|
||||
rule, ok, err := p.ruleGetter.Get(orgID, channelID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
channelFrames, ok, err := p.dataToChannelFrames(ctx, *rule, orgID, channelID, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
err = p.processChannelFrames(ctx, orgID, channelID, channelFrames)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error processing frame: %w", err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) dataToChannelFrames(ctx context.Context, rule LiveChannelRule, orgID int64, channelID string, body []byte) ([]*ChannelFrame, bool, error) {
|
||||
if rule.Converter == nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
channel, err := live.ParseChannel(channelID)
|
||||
if err != nil {
|
||||
logger.Error("Error parsing channel", "error", err, "channel", channelID)
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
vars := Vars{
|
||||
OrgID: orgID,
|
||||
Channel: channelID,
|
||||
Scope: channel.Scope,
|
||||
Namespace: channel.Namespace,
|
||||
Path: channel.Path,
|
||||
}
|
||||
|
||||
frames, err := rule.Converter.Convert(ctx, vars, body)
|
||||
if err != nil {
|
||||
logger.Error("Error converting data", "error", err)
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
return frames, true, nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) processChannelFrames(ctx context.Context, orgID int64, channelID string, channelFrames []*ChannelFrame) error {
|
||||
for _, channelFrame := range channelFrames {
|
||||
var processorChannel = channelID
|
||||
if channelFrame.Channel != "" {
|
||||
processorChannel = channelFrame.Channel
|
||||
}
|
||||
err := p.processFrame(ctx, orgID, processorChannel, channelFrame.Frame)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) processFrame(ctx context.Context, orgID int64, channelID string, frame *data.Frame) error {
|
||||
rule, ruleOk, err := p.ruleGetter.Get(orgID, channelID)
|
||||
if err != nil {
|
||||
logger.Error("Error getting rule", "error", err)
|
||||
return err
|
||||
}
|
||||
if !ruleOk {
|
||||
logger.Debug("Rule not found", "channel", channelID)
|
||||
return nil
|
||||
}
|
||||
|
||||
ch, err := live.ParseChannel(channelID)
|
||||
if err != nil {
|
||||
logger.Error("Error parsing channel", "error", err, "channel", channelID)
|
||||
return err
|
||||
}
|
||||
|
||||
vars := ProcessorVars{
|
||||
Vars: Vars{
|
||||
OrgID: orgID,
|
||||
Channel: channelID,
|
||||
Scope: ch.Scope,
|
||||
Namespace: ch.Namespace,
|
||||
Path: ch.Path,
|
||||
},
|
||||
}
|
||||
|
||||
if rule.Processor != nil {
|
||||
frame, err = rule.Processor.Process(ctx, vars, frame)
|
||||
if err != nil {
|
||||
logger.Error("Error processing frame", "error", err)
|
||||
return err
|
||||
}
|
||||
if frame == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
outputVars := OutputVars{
|
||||
ProcessorVars: vars,
|
||||
}
|
||||
|
||||
if rule.Outputter != nil {
|
||||
frames, err := rule.Outputter.Output(ctx, outputVars, frame)
|
||||
if err != nil {
|
||||
logger.Error("Error outputting frame", "error", err)
|
||||
return err
|
||||
}
|
||||
if len(frames) > 0 {
|
||||
err := p.processChannelFrames(ctx, vars.OrgID, vars.Channel, frames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
107
pkg/services/live/pipeline/pipeline_test.go
Normal file
107
pkg/services/live/pipeline/pipeline_test.go
Normal file
@ -0,0 +1,107 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testRuleGetter struct {
|
||||
mu sync.Mutex
|
||||
rules map[string]*LiveChannelRule
|
||||
}
|
||||
|
||||
func (t *testRuleGetter) Get(orgID int64, channel string) (*LiveChannelRule, bool, error) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
rule, ok := t.rules[channel]
|
||||
return rule, ok, nil
|
||||
}
|
||||
|
||||
func TestPipeline_New(t *testing.T) {
|
||||
p, err := New(&testRuleGetter{})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, p)
|
||||
}
|
||||
|
||||
func TestPipelineNoConverter(t *testing.T) {
|
||||
p, err := New(&testRuleGetter{
|
||||
rules: map[string]*LiveChannelRule{
|
||||
"test": {
|
||||
Converter: nil,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ok, err := p.ProcessInput(context.Background(), 1, "test", []byte(`{}`))
|
||||
require.NoError(t, err)
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
type testConverter struct {
|
||||
channel string
|
||||
frame *data.Frame
|
||||
}
|
||||
|
||||
func (t *testConverter) Convert(_ context.Context, _ Vars, _ []byte) ([]*ChannelFrame, error) {
|
||||
return []*ChannelFrame{{Channel: t.channel, Frame: t.frame}}, nil
|
||||
}
|
||||
|
||||
type testProcessor struct{}
|
||||
|
||||
func (t *testProcessor) Process(_ context.Context, _ ProcessorVars, frame *data.Frame) (*data.Frame, error) {
|
||||
return frame, nil
|
||||
}
|
||||
|
||||
type testOutputter struct {
|
||||
err error
|
||||
frame *data.Frame
|
||||
}
|
||||
|
||||
func (t *testOutputter) Output(_ context.Context, _ OutputVars, frame *data.Frame) ([]*ChannelFrame, error) {
|
||||
if t.err != nil {
|
||||
return nil, t.err
|
||||
}
|
||||
t.frame = frame
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestPipeline(t *testing.T) {
|
||||
outputter := &testOutputter{}
|
||||
p, err := New(&testRuleGetter{
|
||||
rules: map[string]*LiveChannelRule{
|
||||
"stream/test/xxx": {
|
||||
Converter: &testConverter{"", data.NewFrame("test")},
|
||||
Processor: &testProcessor{},
|
||||
Outputter: outputter,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ok, err := p.ProcessInput(context.Background(), 1, "stream/test/xxx", []byte(`{}`))
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.NotNil(t, outputter.frame)
|
||||
}
|
||||
|
||||
func TestPipeline_OutputError(t *testing.T) {
|
||||
boomErr := errors.New("boom")
|
||||
outputter := &testOutputter{err: boomErr}
|
||||
p, err := New(&testRuleGetter{
|
||||
rules: map[string]*LiveChannelRule{
|
||||
"stream/test/xxx": {
|
||||
Converter: &testConverter{"", data.NewFrame("test")},
|
||||
Processor: &testProcessor{},
|
||||
Outputter: outputter,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = p.ProcessInput(context.Background(), 1, "stream/test/xxx", []byte(`{}`))
|
||||
require.ErrorIs(t, err, boomErr)
|
||||
}
|
37
pkg/services/live/pipeline/processor_drop_field.go
Normal file
37
pkg/services/live/pipeline/processor_drop_field.go
Normal file
@ -0,0 +1,37 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
type DropFieldsProcessorConfig struct {
|
||||
FieldNames []string `json:"fieldNames"`
|
||||
}
|
||||
|
||||
// DropFieldsProcessor can drop specified fields from a data.Frame.
|
||||
type DropFieldsProcessor struct {
|
||||
config DropFieldsProcessorConfig
|
||||
}
|
||||
|
||||
func removeIndex(s []*data.Field, index int) []*data.Field {
|
||||
return append(s[:index], s[index+1:]...)
|
||||
}
|
||||
|
||||
func NewDropFieldsProcessor(config DropFieldsProcessorConfig) *DropFieldsProcessor {
|
||||
return &DropFieldsProcessor{config: config}
|
||||
}
|
||||
|
||||
func (d DropFieldsProcessor) Process(_ context.Context, _ ProcessorVars, frame *data.Frame) (*data.Frame, error) {
|
||||
for _, f := range d.config.FieldNames {
|
||||
inner:
|
||||
for i, field := range frame.Fields {
|
||||
if f == field.Name {
|
||||
frame.Fields = removeIndex(frame.Fields, i)
|
||||
continue inner
|
||||
}
|
||||
}
|
||||
}
|
||||
return frame, nil
|
||||
}
|
40
pkg/services/live/pipeline/processor_keep_field.go
Normal file
40
pkg/services/live/pipeline/processor_keep_field.go
Normal file
@ -0,0 +1,40 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
type KeepFieldsProcessorConfig struct {
|
||||
FieldNames []string `json:"fieldNames"`
|
||||
}
|
||||
|
||||
// KeepFieldsProcessor can keep specified fields in a data.Frame dropping all other fields.
|
||||
type KeepFieldsProcessor struct {
|
||||
config KeepFieldsProcessorConfig
|
||||
}
|
||||
|
||||
func NewKeepFieldsProcessor(config KeepFieldsProcessorConfig) *KeepFieldsProcessor {
|
||||
return &KeepFieldsProcessor{config: config}
|
||||
}
|
||||
|
||||
func stringInSlice(str string, slice []string) bool {
|
||||
for _, s := range slice {
|
||||
if s == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d KeepFieldsProcessor) Process(_ context.Context, _ ProcessorVars, frame *data.Frame) (*data.Frame, error) {
|
||||
var fieldsToKeep []*data.Field
|
||||
for _, field := range frame.Fields {
|
||||
if stringInSlice(field.Name, d.config.FieldNames) {
|
||||
fieldsToKeep = append(fieldsToKeep, field)
|
||||
}
|
||||
}
|
||||
f := data.NewFrame(frame.Name, fieldsToKeep...)
|
||||
return f, nil
|
||||
}
|
29
pkg/services/live/pipeline/processor_multiple.go
Normal file
29
pkg/services/live/pipeline/processor_multiple.go
Normal file
@ -0,0 +1,29 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
// MultipleProcessor can combine several Processor and
|
||||
// execute them sequentially.
|
||||
type MultipleProcessor struct {
|
||||
Processors []Processor
|
||||
}
|
||||
|
||||
func (m MultipleProcessor) Process(ctx context.Context, vars ProcessorVars, frame *data.Frame) (*data.Frame, error) {
|
||||
for _, p := range m.Processors {
|
||||
var err error
|
||||
frame, err = p.Process(ctx, vars, frame)
|
||||
if err != nil {
|
||||
logger.Error("Error processing frame", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return frame, nil
|
||||
}
|
||||
|
||||
func NewMultipleProcessor(processors ...Processor) *MultipleProcessor {
|
||||
return &MultipleProcessor{Processors: processors}
|
||||
}
|
8
pkg/services/live/pipeline/rule_builder.go
Normal file
8
pkg/services/live/pipeline/rule_builder.go
Normal file
@ -0,0 +1,8 @@
|
||||
package pipeline
|
||||
|
||||
import "context"
|
||||
|
||||
// RuleBuilder constructs in-memory representation of channel rules.
|
||||
type RuleBuilder interface {
|
||||
BuildRules(ctx context.Context, orgID int64) ([]*LiveChannelRule, error)
|
||||
}
|
83
pkg/services/live/pipeline/rule_cache_segmented.go
Normal file
83
pkg/services/live/pipeline/rule_cache_segmented.go
Normal file
@ -0,0 +1,83 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/live/pipeline/tree"
|
||||
)
|
||||
|
||||
// CacheSegmentedTree provides a fast access to channel rule configuration.
|
||||
type CacheSegmentedTree struct {
|
||||
radixMu sync.RWMutex
|
||||
radix map[int64]*tree.Node
|
||||
ruleBuilder RuleBuilder
|
||||
}
|
||||
|
||||
func NewCacheSegmentedTree(storage RuleBuilder) *CacheSegmentedTree {
|
||||
s := &CacheSegmentedTree{
|
||||
radix: map[int64]*tree.Node{},
|
||||
ruleBuilder: storage,
|
||||
}
|
||||
go s.updatePeriodically()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *CacheSegmentedTree) updatePeriodically() {
|
||||
for {
|
||||
var orgIDs []int64
|
||||
s.radixMu.Lock()
|
||||
for orgID := range s.radix {
|
||||
orgIDs = append(orgIDs, orgID)
|
||||
}
|
||||
s.radixMu.Unlock()
|
||||
for _, orgID := range orgIDs {
|
||||
err := s.fillOrg(orgID)
|
||||
if err != nil {
|
||||
logger.Error("error filling orgId", "error", err, "orgId", orgID)
|
||||
}
|
||||
}
|
||||
time.Sleep(20 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CacheSegmentedTree) fillOrg(orgID int64) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
channels, err := s.ruleBuilder.BuildRules(ctx, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.radixMu.Lock()
|
||||
defer s.radixMu.Unlock()
|
||||
s.radix[orgID] = tree.New()
|
||||
for _, ch := range channels {
|
||||
s.radix[orgID].AddRoute("/"+ch.Pattern, ch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CacheSegmentedTree) Get(orgID int64, channel string) (*LiveChannelRule, bool, error) {
|
||||
s.radixMu.RLock()
|
||||
_, ok := s.radix[orgID]
|
||||
s.radixMu.RUnlock()
|
||||
if !ok {
|
||||
err := s.fillOrg(orgID)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
s.radixMu.RLock()
|
||||
defer s.radixMu.RUnlock()
|
||||
t, ok := s.radix[orgID]
|
||||
if !ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
ps := make(tree.Params, 0, 20)
|
||||
nodeValue := t.GetValue("/"+channel, &ps, true)
|
||||
if nodeValue.Handler == nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
return nodeValue.Handler.(*LiveChannelRule), true, nil
|
||||
}
|
56
pkg/services/live/pipeline/rule_cache_segmented_test.go
Normal file
56
pkg/services/live/pipeline/rule_cache_segmented_test.go
Normal file
@ -0,0 +1,56 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testBuilder struct{}
|
||||
|
||||
func (t *testBuilder) BuildRules(_ context.Context, _ int64) ([]*LiveChannelRule, error) {
|
||||
return []*LiveChannelRule{
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/telegraf/cpu",
|
||||
Converter: NewAutoJsonConverter(AutoJsonConverterConfig{}),
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/telegraf/:metric",
|
||||
},
|
||||
{
|
||||
OrgId: 1,
|
||||
Pattern: "stream/telegraf/:metric/:extra",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestStorage_Get(t *testing.T) {
|
||||
s := NewCacheSegmentedTree(&testBuilder{})
|
||||
rule, ok, err := s.Get(1, "stream/telegraf/cpu")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.NotNil(t, rule.Converter)
|
||||
|
||||
rule, ok, err = s.Get(1, "stream/telegraf/mem")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Nil(t, rule.Converter)
|
||||
|
||||
rule, ok, err = s.Get(1, "stream/telegraf/mem/rss")
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Nil(t, rule.Converter)
|
||||
}
|
||||
|
||||
func BenchmarkRuleGet(b *testing.B) {
|
||||
s := NewCacheSegmentedTree(&testBuilder{})
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, ok, err := s.Get(1, "stream/telegraf/cpu")
|
||||
if err != nil || !ok {
|
||||
b.Fatal("unexpected return values")
|
||||
}
|
||||
}
|
||||
}
|
43
pkg/services/live/pipeline/storage_file.go
Normal file
43
pkg/services/live/pipeline/storage_file.go
Normal file
@ -0,0 +1,43 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// FileStorage can load channel rules from a file on disk.
|
||||
type FileStorage struct{}
|
||||
|
||||
func (f *FileStorage) ListRemoteWriteBackends(_ context.Context, orgID int64) ([]RemoteWriteBackend, error) {
|
||||
backendBytes, _ := ioutil.ReadFile(os.Getenv("GF_LIVE_REMOTE_WRITE_BACKENDS_FILE"))
|
||||
var remoteWriteBackends RemoteWriteBackends
|
||||
err := json.Unmarshal(backendBytes, &remoteWriteBackends)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var backends []RemoteWriteBackend
|
||||
for _, b := range remoteWriteBackends.Backends {
|
||||
if b.OrgId == orgID || (orgID == 1 && b.OrgId == 0) {
|
||||
backends = append(backends, b)
|
||||
}
|
||||
}
|
||||
return backends, nil
|
||||
}
|
||||
|
||||
func (f *FileStorage) ListChannelRules(_ context.Context, orgID int64) ([]ChannelRule, error) {
|
||||
ruleBytes, _ := ioutil.ReadFile(os.Getenv("GF_LIVE_CHANNEL_RULES_FILE"))
|
||||
var channelRules ChannelRules
|
||||
err := json.Unmarshal(ruleBytes, &channelRules)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var rules []ChannelRule
|
||||
for _, r := range channelRules.Rules {
|
||||
if r.OrgId == orgID || (orgID == 1 && r.OrgId == 0) {
|
||||
rules = append(rules, r)
|
||||
}
|
||||
}
|
||||
return rules, nil
|
||||
}
|
16
pkg/services/live/pipeline/testdata/json_auto.golden.txt
vendored
Normal file
16
pkg/services/live/pipeline/testdata/json_auto.golden.txt
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
🌟 This was machine generated. Do not edit. 🌟
|
||||
|
||||
Frame[0]
|
||||
Name:
|
||||
Dimensions: 8 Fields by 1 Rows
|
||||
+-------------------------------+------------------+-----------------------+-----------------------+--------------------+--------------------+----------------------------+----------------------------+
|
||||
| Name: | Name: ax | Name: string_array[0] | Name: string_array[1] | Name: int_array[0] | Name: int_array[1] | Name: map_with_floats.key1 | Name: map_with_floats.key2 |
|
||||
| Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: |
|
||||
| Type: []time.Time | Type: []*float64 | Type: []*string | Type: []*string | Type: []*float64 | Type: []*float64 | Type: []*float64 | Type: []*float64 |
|
||||
+-------------------------------+------------------+-----------------------+-----------------------+--------------------+--------------------+----------------------------+----------------------------+
|
||||
| 2021-01-01 12:12:12 +0000 UTC | 1 | 1 | 2 | 1 | 2 | 2 | 3 |
|
||||
+-------------------------------+------------------+-----------------------+-----------------------+--------------------+--------------------+----------------------------+----------------------------+
|
||||
|
||||
|
||||
====== TEST DATA RESPONSE (arrow base64) ======
|
||||
FRAME=QVJST1cxAAD/////KAQAABAAAAAAAAoADgAMAAsABAAKAAAAFAAAAAAAAAEDAAoADAAAAAgABAAKAAAACAAAAFAAAAACAAAAKAAAAAQAAABc/P//CAAAAAwAAAAAAAAAAAAAAAUAAAByZWZJZAAAAHz8//8IAAAADAAAAAAAAAAAAAAABAAAAG5hbWUAAAAACAAAAEADAADUAgAAUAIAAOQBAAB0AQAABAEAAIQAAAAEAAAAWv3//xQAAABMAAAATAAAAAAAAwFMAAAAAQAAAAQAAADg/P//CAAAACAAAAAUAAAAbWFwX3dpdGhfZmxvYXRzLmtleTIAAAAABAAAAG5hbWUAAAAAAAAAAOr8//8AAAIAFAAAAG1hcF93aXRoX2Zsb2F0cy5rZXkyAAAAANb9//8UAAAATAAAAEwAAAAAAAMBTAAAAAEAAAAEAAAAXP3//wgAAAAgAAAAFAAAAG1hcF93aXRoX2Zsb2F0cy5rZXkxAAAAAAQAAABuYW1lAAAAAAAAAABm/f//AAACABQAAABtYXBfd2l0aF9mbG9hdHMua2V5MQAAAABS/v//FAAAAEQAAABEAAAAAAADAUQAAAABAAAABAAAANj9//8IAAAAGAAAAAwAAABpbnRfYXJyYXlbMV0AAAAABAAAAG5hbWUAAAAAAAAAANr9//8AAAIADAAAAGludF9hcnJheVsxXQAAAAC+/v//FAAAAEQAAABEAAAAAAADAUQAAAABAAAABAAAAET+//8IAAAAGAAAAAwAAABpbnRfYXJyYXlbMF0AAAAABAAAAG5hbWUAAAAAAAAAAEb+//8AAAIADAAAAGludF9hcnJheVswXQAAAAAq////FAAAAEQAAABEAAAAAAAFAUAAAAABAAAABAAAALD+//8IAAAAGAAAAA8AAABzdHJpbmdfYXJyYXlbMV0ABAAAAG5hbWUAAAAAAAAAAJj///8PAAAAc3RyaW5nX2FycmF5WzFdAJL///8UAAAARAAAAEgAAAAAAAUBRAAAAAEAAAAEAAAAGP///wgAAAAYAAAADwAAAHN0cmluZ19hcnJheVswXQAEAAAAbmFtZQAAAAAAAAAABAAEAAQAAAAPAAAAc3RyaW5nX2FycmF5WzBdAAAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAAA4AAAAOAAAAAAAAwE4AAAAAQAAAAQAAACY////CAAAAAwAAAACAAAAYXgAAAQAAABuYW1lAAAAAAAAAACO////AAACAAIAAABheAAAAAASABgAFAAAABMADAAAAAgABAASAAAAFAAAAEAAAABIAAAAAAAACkgAAAABAAAADAAAAAgADAAIAAQACAAAAAgAAAAMAAAAAAAAAAAAAAAEAAAAbmFtZQAAAAAAAAAAAAAGAAgABgAGAAAAAAADAAAAAAAAAAAA//////gBAAAUAAAAAAAAAAwAFgAUABMADAAEAAwAAABQAAAAAAAAABQAAAAAAAADAwAKABgADAAIAAQACgAAABQAAAA4AQAAAQAAAAAAAAAAAAAAEgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAACAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAIAAAAAAAAABgAAAAAAAAACAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAIAAAAAAAAACgAAAAAAAAACAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAMAAAAAAAAAAIAAAAAAAAADgAAAAAAAAAAAAAAAAAAAA4AAAAAAAAAAgAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAACAAAAAAAAABIAAAAAAAAAAAAAAAAAAAASAAAAAAAAAAIAAAAAAAAAAAAAAAIAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAGL0vkhpWFgAAAAAAAPA/AAAAAAEAAAAxAAAAAAAAAAAAAAABAAAAMgAAAAAAAAAAAAAAAADwPwAAAAAAAABAAAAAAAAAAEAAAAAAAAAIQBAAAAAMABQAEgAMAAgABAAMAAAAEAAAACwAAAA8AAAAAAADAAEAAAA4BAAAAAAAAAACAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoADAAAAAgABAAKAAAACAAAAFAAAAACAAAAKAAAAAQAAABc/P//CAAAAAwAAAAAAAAAAAAAAAUAAAByZWZJZAAAAHz8//8IAAAADAAAAAAAAAAAAAAABAAAAG5hbWUAAAAACAAAAEADAADUAgAAUAIAAOQBAAB0AQAABAEAAIQAAAAEAAAAWv3//xQAAABMAAAATAAAAAAAAwFMAAAAAQAAAAQAAADg/P//CAAAACAAAAAUAAAAbWFwX3dpdGhfZmxvYXRzLmtleTIAAAAABAAAAG5hbWUAAAAAAAAAAOr8//8AAAIAFAAAAG1hcF93aXRoX2Zsb2F0cy5rZXkyAAAAANb9//8UAAAATAAAAEwAAAAAAAMBTAAAAAEAAAAEAAAAXP3//wgAAAAgAAAAFAAAAG1hcF93aXRoX2Zsb2F0cy5rZXkxAAAAAAQAAABuYW1lAAAAAAAAAABm/f//AAACABQAAABtYXBfd2l0aF9mbG9hdHMua2V5MQAAAABS/v//FAAAAEQAAABEAAAAAAADAUQAAAABAAAABAAAANj9//8IAAAAGAAAAAwAAABpbnRfYXJyYXlbMV0AAAAABAAAAG5hbWUAAAAAAAAAANr9//8AAAIADAAAAGludF9hcnJheVsxXQAAAAC+/v//FAAAAEQAAABEAAAAAAADAUQAAAABAAAABAAAAET+//8IAAAAGAAAAAwAAABpbnRfYXJyYXlbMF0AAAAABAAAAG5hbWUAAAAAAAAAAEb+//8AAAIADAAAAGludF9hcnJheVswXQAAAAAq////FAAAAEQAAABEAAAAAAAFAUAAAAABAAAABAAAALD+//8IAAAAGAAAAA8AAABzdHJpbmdfYXJyYXlbMV0ABAAAAG5hbWUAAAAAAAAAAJj///8PAAAAc3RyaW5nX2FycmF5WzFdAJL///8UAAAARAAAAEgAAAAAAAUBRAAAAAEAAAAEAAAAGP///wgAAAAYAAAADwAAAHN0cmluZ19hcnJheVswXQAEAAAAbmFtZQAAAAAAAAAABAAEAAQAAAAPAAAAc3RyaW5nX2FycmF5WzBdAAAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAAA4AAAAOAAAAAAAAwE4AAAAAQAAAAQAAACY////CAAAAAwAAAACAAAAYXgAAAQAAABuYW1lAAAAAAAAAACO////AAACAAIAAABheAAAAAASABgAFAAAABMADAAAAAgABAASAAAAFAAAAEAAAABIAAAAAAAACkgAAAABAAAADAAAAAgADAAIAAQACAAAAAgAAAAMAAAAAAAAAAAAAAAEAAAAbmFtZQAAAAAAAAAAAAAGAAgABgAGAAAAAAADAAAAAAAAAAAAWAQAAEFSUk9XMQ==
|
10
pkg/services/live/pipeline/testdata/json_auto.json
vendored
Normal file
10
pkg/services/live/pipeline/testdata/json_auto.json
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"ax": 1,
|
||||
"string_array": ["1", "2"],
|
||||
"int_array": [1, 2],
|
||||
"map_with_floats": {
|
||||
"key1": 2.0,
|
||||
"key2": 3.0
|
||||
},
|
||||
"bx": null
|
||||
}
|
16
pkg/services/live/pipeline/testdata/json_exact.golden.txt
vendored
Normal file
16
pkg/services/live/pipeline/testdata/json_exact.golden.txt
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
🌟 This was machine generated. Do not edit. 🌟
|
||||
|
||||
Frame[0]
|
||||
Name:
|
||||
Dimensions: 3 Fields by 1 Rows
|
||||
+-------------------------------+------------------+----------------------------+
|
||||
| Name: time | Name: ax | Name: key1 |
|
||||
| Labels: | Labels: | Labels: label1=3, label2=3 |
|
||||
| Type: []time.Time | Type: []*float64 | Type: []*float64 |
|
||||
+-------------------------------+------------------+----------------------------+
|
||||
| 2021-01-01 12:12:12 +0000 UTC | 1 | 2 |
|
||||
+-------------------------------+------------------+----------------------------+
|
||||
|
||||
|
||||
====== TEST DATA RESPONSE (arrow base64) ======
|
||||
FRAME=QVJST1cxAAD/////WAIAABAAAAAAAAoADgAMAAsABAAKAAAAFAAAAAAAAAEDAAoADAAAAAgABAAKAAAACAAAAFAAAAACAAAAKAAAAAQAAAA0/v//CAAAAAwAAAAAAAAAAAAAAAUAAAByZWZJZAAAAFT+//8IAAAADAAAAAAAAAAAAAAABAAAAG5hbWUAAAAAAwAAAEABAACwAAAABAAAAGr///8UAAAAeAAAAHgAAAAAAAMBeAAAAAIAAAAsAAAABAAAAKj+//8IAAAAEAAAAAQAAABrZXkxAAAAAAQAAABuYW1lAAAAAMz+//8IAAAAJAAAABsAAAB7ImxhYmVsMSI6IjMiLCJsYWJlbDIiOiIzIn0ABgAAAGxhYmVscwAAAAAAANr+//8AAAIABAAAAGtleTEAABIAGAAUABMAEgAMAAAACAAEABIAAAAUAAAAXAAAAFwAAAAAAAMBXAAAAAIAAAAoAAAABAAAAFD///8IAAAADAAAAAIAAABheAAABAAAAG5hbWUAAAAAcP///wgAAAAMAAAAAgAAAHt9AAAGAAAAbGFiZWxzAAAAAAAAZv///wAAAgACAAAAYXgAAAAAEgAYABQAAAATAAwAAAAIAAQAEgAAABQAAABoAAAAcAAAAAAAAApwAAAAAgAAADQAAAAEAAAA3P///wgAAAAQAAAABAAAAHRpbWUAAAAABAAAAG5hbWUAAAAACAAMAAgABAAIAAAACAAAAAwAAAACAAAAe30AAAYAAABsYWJlbHMAAAAAAAAAAAYACAAGAAYAAAAAAAMABAAAAHRpbWUAAAAAAAAAAP/////oAAAAFAAAAAAAAAAMABYAFAATAAwABAAMAAAAGAAAAAAAAAAUAAAAAAAAAwMACgAYAAwACAAEAAoAAAAUAAAAeAAAAAEAAAAAAAAAAAAAAAYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAgAAAAAAAAAEAAAAAAAAAAAAAAAAAAAABAAAAAAAAAACAAAAAAAAAAAAAAAAwAAAAEAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAYvS+SGlYWAAAAAAAA8D8AAAAAAAAAQBAAAAAMABQAEgAMAAgABAAMAAAAEAAAACwAAAA4AAAAAAADAAEAAABoAgAAAAAAAPAAAAAAAAAAGAAAAAAAAAAAAAAAAAAAAAAACgAMAAAACAAEAAoAAAAIAAAAUAAAAAIAAAAoAAAABAAAADT+//8IAAAADAAAAAAAAAAAAAAABQAAAHJlZklkAAAAVP7//wgAAAAMAAAAAAAAAAAAAAAEAAAAbmFtZQAAAAADAAAAQAEAALAAAAAEAAAAav///xQAAAB4AAAAeAAAAAAAAwF4AAAAAgAAACwAAAAEAAAAqP7//wgAAAAQAAAABAAAAGtleTEAAAAABAAAAG5hbWUAAAAAzP7//wgAAAAkAAAAGwAAAHsibGFiZWwxIjoiMyIsImxhYmVsMiI6IjMifQAGAAAAbGFiZWxzAAAAAAAA2v7//wAAAgAEAAAAa2V5MQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABcAAAAXAAAAAAAAwFcAAAAAgAAACgAAAAEAAAAUP///wgAAAAMAAAAAgAAAGF4AAAEAAAAbmFtZQAAAABw////CAAAAAwAAAACAAAAe30AAAYAAABsYWJlbHMAAAAAAABm////AAACAAIAAABheAAAAAASABgAFAAAABMADAAAAAgABAASAAAAFAAAAGgAAABwAAAAAAAACnAAAAACAAAANAAAAAQAAADc////CAAAABAAAAAEAAAAdGltZQAAAAAEAAAAbmFtZQAAAAAIAAwACAAEAAgAAAAIAAAADAAAAAIAAAB7fQAABgAAAGxhYmVscwAAAAAAAAAABgAIAAYABgAAAAAAAwAEAAAAdGltZQAAAACAAgAAQVJST1cx
|
10
pkg/services/live/pipeline/testdata/json_exact.json
vendored
Normal file
10
pkg/services/live/pipeline/testdata/json_exact.json
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"ax": 1,
|
||||
"string_array": ["1", "2"],
|
||||
"int_array": [1, 2],
|
||||
"map_with_floats": {
|
||||
"key1": 2.0,
|
||||
"key2": 3.0
|
||||
},
|
||||
"bx": null
|
||||
}
|
29
pkg/services/live/pipeline/tree/LICENSE
Normal file
29
pkg/services/live/pipeline/tree/LICENSE
Normal file
@ -0,0 +1,29 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2013, Julien Schmidt
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
11
pkg/services/live/pipeline/tree/bytesconv.go
Normal file
11
pkg/services/live/pipeline/tree/bytesconv.go
Normal file
@ -0,0 +1,11 @@
|
||||
package tree
|
||||
|
||||
// StringToBytes converts string to byte slice without a memory allocation.
|
||||
func StringToBytes(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
||||
|
||||
// BytesToString converts byte slice to string without a memory allocation.
|
||||
func BytesToString(b []byte) string {
|
||||
return string(b)
|
||||
}
|
40
pkg/services/live/pipeline/tree/params.go
Normal file
40
pkg/services/live/pipeline/tree/params.go
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright 2013 Julien Schmidt. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// in the LICENSE file.
|
||||
package tree
|
||||
|
||||
import "context"
|
||||
|
||||
// Param is a single URL parameter, consisting of a key and a value.
|
||||
type Param struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
// Params is a Param-slice, as returned by the router.
|
||||
// The slice is ordered, the first URL parameter is also the first slice value.
|
||||
// It is therefore safe to read values by the index.
|
||||
type Params []Param
|
||||
|
||||
// Get returns the value of the first Param which key matches the given name.
|
||||
// If no matching Param is found, an empty string is returned.
|
||||
func (ps Params) Get(name string) (string, bool) {
|
||||
for _, p := range ps {
|
||||
if p.Key == name {
|
||||
return p.Value, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
type paramsKey struct{}
|
||||
|
||||
// ParamsKey is the request context key under which URL Params are stored.
|
||||
var ParamsKey = paramsKey{}
|
||||
|
||||
// ParamsFromContext pulls the URL parameters from a request context,
|
||||
// or returns nil if none are present.
|
||||
func ParamsFromContext(ctx context.Context) Params {
|
||||
p, _ := ctx.Value(ParamsKey).(Params)
|
||||
return p
|
||||
}
|
9
pkg/services/live/pipeline/tree/readme.md
Normal file
9
pkg/services/live/pipeline/tree/readme.md
Normal file
@ -0,0 +1,9 @@
|
||||
This is a tree code from https://github.com/julienschmidt/httprouter with an important fixes made inside Gin web framework.
|
||||
|
||||
See:
|
||||
* https://github.com/julienschmidt/httprouter/pull/329
|
||||
* https://github.com/gin-gonic/gin/issues/2786
|
||||
|
||||
See also https://github.com/julienschmidt/httprouter/issues/235 – that's the reason why we can't use a custom branch patched with fixes.
|
||||
|
||||
Original LICENSE and copyright left unchanged here.
|
799
pkg/services/live/pipeline/tree/tree.go
Normal file
799
pkg/services/live/pipeline/tree/tree.go
Normal file
@ -0,0 +1,799 @@
|
||||
// Copyright 2013 Julien Schmidt. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE
|
||||
|
||||
package tree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
strColon = []byte(":")
|
||||
strStar = []byte("*")
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
if a <= b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func longestCommonPrefix(a, b string) int {
|
||||
i := 0
|
||||
max := min(len(a), len(b))
|
||||
for i < max && a[i] == b[i] {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// addChild will add a child Node, keeping wildcards at the end
|
||||
func (n *Node) addChild(child *Node) {
|
||||
if n.wildChild && len(n.children) > 0 {
|
||||
wildcardChild := n.children[len(n.children)-1]
|
||||
n.children = append(n.children[:len(n.children)-1], child, wildcardChild)
|
||||
} else {
|
||||
n.children = append(n.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
func countParams(path string) uint16 {
|
||||
var n uint16
|
||||
s := StringToBytes(path)
|
||||
n += uint16(bytes.Count(s, strColon))
|
||||
n += uint16(bytes.Count(s, strStar))
|
||||
return n
|
||||
}
|
||||
|
||||
type nodeType uint8
|
||||
|
||||
const (
|
||||
root nodeType = iota + 1
|
||||
param
|
||||
catchAll
|
||||
)
|
||||
|
||||
type Handler interface{}
|
||||
|
||||
func New() *Node {
|
||||
return new(Node)
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
path string
|
||||
indices string
|
||||
wildChild bool
|
||||
nType nodeType
|
||||
priority uint32
|
||||
children []*Node // child nodes, at most 1 :param style Node at the end of the array
|
||||
handler Handler
|
||||
fullPath string
|
||||
}
|
||||
|
||||
// Increments priority of the given child and reorders if necessary
|
||||
func (n *Node) incrementChildPrio(pos int) int {
|
||||
cs := n.children
|
||||
cs[pos].priority++
|
||||
prio := cs[pos].priority
|
||||
|
||||
// Adjust position (move to front)
|
||||
newPos := pos
|
||||
for ; newPos > 0 && cs[newPos-1].priority < prio; newPos-- {
|
||||
// Swap Node positions
|
||||
cs[newPos-1], cs[newPos] = cs[newPos], cs[newPos-1]
|
||||
}
|
||||
|
||||
// Build new index char string
|
||||
if newPos != pos {
|
||||
n.indices = n.indices[:newPos] + // Unchanged prefix, might be empty
|
||||
n.indices[pos:pos+1] + // The index char we move
|
||||
n.indices[newPos:pos] + n.indices[pos+1:] // Rest without char at 'pos'
|
||||
}
|
||||
|
||||
return newPos
|
||||
}
|
||||
|
||||
func (n *Node) AddRoute(path string, handlers Handler) {
|
||||
n.addRoute(path, handlers)
|
||||
}
|
||||
|
||||
// addRoute adds a Node with the given handle to the path.
|
||||
// Not concurrency-safe!
|
||||
func (n *Node) addRoute(path string, handlers Handler) {
|
||||
fullPath := path
|
||||
n.priority++
|
||||
|
||||
// Empty tree
|
||||
if len(n.path) == 0 && len(n.children) == 0 {
|
||||
n.insertChild(path, fullPath, handlers)
|
||||
n.nType = root
|
||||
return
|
||||
}
|
||||
|
||||
parentFullPathIndex := 0
|
||||
|
||||
walk:
|
||||
for {
|
||||
// Find the longest common prefix.
|
||||
// This also implies that the common prefix contains no ':' or '*'
|
||||
// since the existing key can't contain those chars.
|
||||
i := longestCommonPrefix(path, n.path)
|
||||
|
||||
// Split edge
|
||||
if i < len(n.path) {
|
||||
child := Node{
|
||||
path: n.path[i:],
|
||||
wildChild: n.wildChild,
|
||||
indices: n.indices,
|
||||
children: n.children,
|
||||
handler: n.handler,
|
||||
priority: n.priority - 1,
|
||||
fullPath: n.fullPath,
|
||||
}
|
||||
|
||||
n.children = []*Node{&child}
|
||||
// []byte for proper unicode char conversion, see #65
|
||||
n.indices = BytesToString([]byte{n.path[i]})
|
||||
n.path = path[:i]
|
||||
n.handler = nil
|
||||
n.wildChild = false
|
||||
n.fullPath = fullPath[:parentFullPathIndex+i]
|
||||
}
|
||||
|
||||
// Make new Node a child of this Node
|
||||
if i < len(path) {
|
||||
path = path[i:]
|
||||
c := path[0]
|
||||
|
||||
// '/' after param
|
||||
if n.nType == param && c == '/' && len(n.children) == 1 {
|
||||
parentFullPathIndex += len(n.path)
|
||||
n = n.children[0]
|
||||
n.priority++
|
||||
continue walk
|
||||
}
|
||||
|
||||
// Check if a child with the next path byte exists
|
||||
for i, max := 0, len(n.indices); i < max; i++ {
|
||||
if c == n.indices[i] {
|
||||
parentFullPathIndex += len(n.path)
|
||||
i = n.incrementChildPrio(i)
|
||||
n = n.children[i]
|
||||
continue walk
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise insert it
|
||||
if c != ':' && c != '*' && n.nType != catchAll {
|
||||
// []byte for proper unicode char conversion, see #65
|
||||
n.indices += BytesToString([]byte{c})
|
||||
child := &Node{
|
||||
fullPath: fullPath,
|
||||
}
|
||||
n.addChild(child)
|
||||
n.incrementChildPrio(len(n.indices) - 1)
|
||||
n = child
|
||||
} else if n.wildChild {
|
||||
// inserting a wildcard Node, need to check if it conflicts with the existing wildcard
|
||||
n = n.children[len(n.children)-1]
|
||||
n.priority++
|
||||
|
||||
// Check if the wildcard matches
|
||||
if len(path) >= len(n.path) && n.path == path[:len(n.path)] &&
|
||||
// Adding a child to a catchAll is not possible
|
||||
n.nType != catchAll &&
|
||||
// Check for longer wildcard, e.g. :name and :names
|
||||
(len(n.path) >= len(path) || path[len(n.path)] == '/') {
|
||||
continue walk
|
||||
}
|
||||
|
||||
// Wildcard conflict
|
||||
pathSeg := path
|
||||
if n.nType != catchAll {
|
||||
pathSeg = strings.SplitN(pathSeg, "/", 2)[0]
|
||||
}
|
||||
prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path
|
||||
panic("'" + pathSeg +
|
||||
"' in new path '" + fullPath +
|
||||
"' conflicts with existing wildcard '" + n.path +
|
||||
"' in existing prefix '" + prefix +
|
||||
"'")
|
||||
}
|
||||
|
||||
n.insertChild(path, fullPath, handlers)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise add handle to current Node
|
||||
if n.handler != nil {
|
||||
panic("handler are already registered for path '" + fullPath + "'")
|
||||
}
|
||||
n.handler = handlers
|
||||
n.fullPath = fullPath
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Search for a wildcard segment and check the name for invalid characters.
|
||||
// Returns -1 as index, if no wildcard was found.
|
||||
func findWildcard(path string) (wildcard string, i int, valid bool) {
|
||||
// Find start
|
||||
for start, c := range []byte(path) {
|
||||
// A wildcard starts with ':' (param) or '*' (catch-all)
|
||||
if c != ':' && c != '*' {
|
||||
continue
|
||||
}
|
||||
|
||||
// Find end and check for invalid characters
|
||||
valid = true
|
||||
for end, c := range []byte(path[start+1:]) {
|
||||
switch c {
|
||||
case '/':
|
||||
return path[start : start+1+end], start, valid
|
||||
case ':', '*':
|
||||
valid = false
|
||||
}
|
||||
}
|
||||
return path[start:], start, valid
|
||||
}
|
||||
return "", -1, false
|
||||
}
|
||||
|
||||
func (n *Node) insertChild(path string, fullPath string, handlers Handler) {
|
||||
for {
|
||||
// Find prefix until first wildcard
|
||||
wildcard, i, valid := findWildcard(path)
|
||||
if i < 0 { // No wildcard found
|
||||
break
|
||||
}
|
||||
|
||||
// The wildcard name must not contain ':' and '*'
|
||||
if !valid {
|
||||
panic("only one wildcard per path segment is allowed, has: '" +
|
||||
wildcard + "' in path '" + fullPath + "'")
|
||||
}
|
||||
|
||||
// check if the wildcard has a name
|
||||
if len(wildcard) < 2 {
|
||||
panic("wildcards must be named with a non-empty name in path '" + fullPath + "'")
|
||||
}
|
||||
|
||||
if wildcard[0] == ':' { // param
|
||||
if i > 0 {
|
||||
// Insert prefix before the current wildcard
|
||||
n.path = path[:i]
|
||||
path = path[i:]
|
||||
}
|
||||
|
||||
child := &Node{
|
||||
nType: param,
|
||||
path: wildcard,
|
||||
fullPath: fullPath,
|
||||
}
|
||||
n.addChild(child)
|
||||
n.wildChild = true
|
||||
n = child
|
||||
n.priority++
|
||||
|
||||
// if the path doesn't end with the wildcard, then there
|
||||
// will be another non-wildcard subpath starting with '/'
|
||||
if len(wildcard) < len(path) {
|
||||
path = path[len(wildcard):]
|
||||
|
||||
child := &Node{
|
||||
priority: 1,
|
||||
fullPath: fullPath,
|
||||
}
|
||||
n.addChild(child)
|
||||
n = child
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise we're done. Insert the handle in the new leaf
|
||||
n.handler = handlers
|
||||
return
|
||||
}
|
||||
|
||||
// catchAll
|
||||
if i+len(wildcard) != len(path) {
|
||||
panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'")
|
||||
}
|
||||
|
||||
if len(n.path) > 0 && n.path[len(n.path)-1] == '/' {
|
||||
panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'")
|
||||
}
|
||||
|
||||
// currently fixed width 1 for '/'
|
||||
i--
|
||||
if path[i] != '/' {
|
||||
panic("no / before catch-all in path '" + fullPath + "'")
|
||||
}
|
||||
|
||||
n.path = path[:i]
|
||||
|
||||
// First Node: catchAll Node with empty path
|
||||
child := &Node{
|
||||
wildChild: true,
|
||||
nType: catchAll,
|
||||
fullPath: fullPath,
|
||||
}
|
||||
|
||||
n.addChild(child)
|
||||
n.indices = string('/')
|
||||
n = child
|
||||
n.priority++
|
||||
|
||||
// second Node: Node holding the variable
|
||||
child = &Node{
|
||||
path: path[i:],
|
||||
nType: catchAll,
|
||||
handler: handlers,
|
||||
priority: 1,
|
||||
fullPath: fullPath,
|
||||
}
|
||||
n.children = []*Node{child}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If no wildcard was found, simply insert the path and handle
|
||||
n.path = path
|
||||
n.handler = handlers
|
||||
n.fullPath = fullPath
|
||||
}
|
||||
|
||||
// NodeValue holds return values of (*Node).getValue method
|
||||
type NodeValue struct {
|
||||
Handler Handler
|
||||
Params *Params
|
||||
Tsr bool
|
||||
FullPath string
|
||||
}
|
||||
|
||||
func (n *Node) GetValue(path string, params *Params, unescape bool) (value NodeValue) {
|
||||
return n.getValue(path, params, unescape)
|
||||
}
|
||||
|
||||
// Returns the handle registered with the given path (key). The values of
|
||||
// wildcards are saved to a map.
|
||||
// If no handle can be found, a TSR (trailing slash redirect) recommendation is
|
||||
// made if a handle exists with an extra (without the) trailing slash for the
|
||||
// given path.
|
||||
// nolint:gocyclo
|
||||
func (n *Node) getValue(path string, params *Params, unescape bool) (value NodeValue) {
|
||||
var (
|
||||
skippedPath string
|
||||
latestNode = n // Caching the latest Node
|
||||
)
|
||||
|
||||
walk: // Outer loop for walking the tree
|
||||
for {
|
||||
prefix := n.path
|
||||
if len(path) > len(prefix) {
|
||||
if path[:len(prefix)] == prefix {
|
||||
path = path[len(prefix):]
|
||||
|
||||
// Try all the non-wildcard children first by matching the indices
|
||||
idxc := path[0]
|
||||
for i, c := range []byte(n.indices) {
|
||||
if c == idxc {
|
||||
// strings.HasPrefix(n.children[len(n.children)-1].path, ":") == n.wildChild
|
||||
if n.wildChild {
|
||||
skippedPath = prefix + path
|
||||
latestNode = &Node{
|
||||
path: n.path,
|
||||
wildChild: n.wildChild,
|
||||
nType: n.nType,
|
||||
priority: n.priority,
|
||||
children: n.children,
|
||||
handler: n.handler,
|
||||
fullPath: n.fullPath,
|
||||
}
|
||||
}
|
||||
|
||||
n = n.children[i]
|
||||
continue walk
|
||||
}
|
||||
}
|
||||
// If the path at the end of the loop is not equal to '/' and the current Node has no child nodes
|
||||
// the current Node needs to be equal to the latest matching Node
|
||||
matched := path != "/" && !n.wildChild
|
||||
if matched {
|
||||
n = latestNode
|
||||
}
|
||||
|
||||
// If there is no wildcard pattern, recommend a redirection
|
||||
if !n.wildChild {
|
||||
// Nothing found.
|
||||
// We can recommend to redirect to the same URL without a
|
||||
// trailing slash if a leaf exists for that path.
|
||||
value.Tsr = path == "/" && n.handler != nil
|
||||
return
|
||||
}
|
||||
|
||||
// Handle wildcard child, which is always at the end of the array
|
||||
n = n.children[len(n.children)-1]
|
||||
|
||||
switch n.nType {
|
||||
case param:
|
||||
// fix truncate the parameter
|
||||
// tree_test.go line: 204
|
||||
if matched {
|
||||
path = prefix + path
|
||||
// The saved path is used after the prefix route is intercepted by matching
|
||||
if n.indices == "/" {
|
||||
path = skippedPath[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Find param end (either '/' or path end)
|
||||
end := 0
|
||||
for end < len(path) && path[end] != '/' {
|
||||
end++
|
||||
}
|
||||
|
||||
// Save param value
|
||||
if params != nil && cap(*params) > 0 {
|
||||
if value.Params == nil {
|
||||
value.Params = params
|
||||
}
|
||||
// Expand slice within preallocated capacity
|
||||
i := len(*value.Params)
|
||||
*value.Params = (*value.Params)[:i+1]
|
||||
val := path[:end]
|
||||
if unescape {
|
||||
if v, err := url.QueryUnescape(val); err == nil {
|
||||
val = v
|
||||
}
|
||||
}
|
||||
(*value.Params)[i] = Param{
|
||||
Key: n.path[1:],
|
||||
Value: val,
|
||||
}
|
||||
}
|
||||
|
||||
// we need to go deeper!
|
||||
if end < len(path) {
|
||||
if len(n.children) > 0 {
|
||||
path = path[end:]
|
||||
n = n.children[0]
|
||||
continue walk
|
||||
}
|
||||
|
||||
// ... but we can't
|
||||
value.Tsr = len(path) == end+1
|
||||
return
|
||||
}
|
||||
|
||||
if value.Handler = n.handler; value.Handler != nil {
|
||||
value.FullPath = n.fullPath
|
||||
return
|
||||
}
|
||||
if len(n.children) == 1 {
|
||||
// No handle found. Check if a handle for this path + a
|
||||
// trailing slash exists for TSR recommendation
|
||||
n = n.children[0]
|
||||
value.Tsr = n.path == "/" && n.handler != nil
|
||||
}
|
||||
return
|
||||
|
||||
case catchAll:
|
||||
// Save param value
|
||||
if params != nil {
|
||||
if value.Params == nil {
|
||||
value.Params = params
|
||||
}
|
||||
// Expand slice within preallocated capacity
|
||||
i := len(*value.Params)
|
||||
*value.Params = (*value.Params)[:i+1]
|
||||
val := path
|
||||
if unescape {
|
||||
if v, err := url.QueryUnescape(path); err == nil {
|
||||
val = v
|
||||
}
|
||||
}
|
||||
(*value.Params)[i] = Param{
|
||||
Key: n.path[2:],
|
||||
Value: val,
|
||||
}
|
||||
}
|
||||
|
||||
value.Handler = n.handler
|
||||
value.FullPath = n.fullPath
|
||||
return
|
||||
|
||||
default:
|
||||
panic("invalid Node type")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if path == prefix {
|
||||
// If the current path does not equal '/' and the Node does not have a registered handle and the most recently matched Node has a child Node
|
||||
// the current Node needs to be equal to the latest matching Node
|
||||
if latestNode.wildChild && n.handler == nil && path != "/" {
|
||||
n = latestNode.children[len(latestNode.children)-1]
|
||||
}
|
||||
// We should have reached the Node containing the handle.
|
||||
// Check if this Node has a handle registered.
|
||||
if value.Handler = n.handler; value.Handler != nil {
|
||||
value.FullPath = n.fullPath
|
||||
return
|
||||
}
|
||||
|
||||
// If there is no handle for this route, but this route has a
|
||||
// wildcard child, there must be a handle for this path with an
|
||||
// additional trailing slash
|
||||
if path == "/" && n.wildChild && n.nType != root {
|
||||
value.Tsr = true
|
||||
return
|
||||
}
|
||||
|
||||
// No handle found. Check if a handle for this path + a
|
||||
// trailing slash exists for trailing slash recommendation
|
||||
for i, c := range []byte(n.indices) {
|
||||
if c == '/' {
|
||||
n = n.children[i]
|
||||
value.Tsr = (len(n.path) == 1 && n.handler != nil) ||
|
||||
(n.nType == catchAll && n.children[0].handler != nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if path != "/" && len(skippedPath) > 0 && strings.HasSuffix(skippedPath, path) {
|
||||
path = skippedPath
|
||||
// Reduce the number of cycles
|
||||
n, latestNode = latestNode, n
|
||||
// skippedPath cannot execute
|
||||
// example:
|
||||
// * /:cc/cc
|
||||
// call /a/cc expectations:match/200 Actual:match/200
|
||||
// call /a/dd expectations:unmatch/404 Actual: panic
|
||||
// call /addr/dd/aa expectations:unmatch/404 Actual: panic
|
||||
// skippedPath: It can only be executed if the secondary route is not found
|
||||
skippedPath = ""
|
||||
continue walk
|
||||
}
|
||||
|
||||
// Nothing found. We can recommend to redirect to the same URL with an
|
||||
// extra trailing slash if a leaf exists for that path
|
||||
value.Tsr = path == "/" ||
|
||||
(len(prefix) == len(path)+1 && n.handler != nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Makes a case-insensitive lookup of the given path and tries to find a handler.
|
||||
// It can optionally also fix trailing slashes.
|
||||
// It returns the case-corrected path and a bool indicating whether the lookup
|
||||
// was successful.
|
||||
func (n *Node) findCaseInsensitivePath(path string, fixTrailingSlash bool) ([]byte, bool) {
|
||||
const stackBufSize = 128
|
||||
|
||||
// Use a static sized buffer on the stack in the common case.
|
||||
// If the path is too long, allocate a buffer on the heap instead.
|
||||
buf := make([]byte, 0, stackBufSize)
|
||||
if length := len(path) + 1; length > stackBufSize {
|
||||
buf = make([]byte, 0, length)
|
||||
}
|
||||
|
||||
ciPath := n.findCaseInsensitivePathRec(
|
||||
path,
|
||||
buf, // Preallocate enough memory for new path
|
||||
[4]byte{}, // Empty rune buffer
|
||||
fixTrailingSlash,
|
||||
)
|
||||
|
||||
return ciPath, ciPath != nil
|
||||
}
|
||||
|
||||
// Shift bytes in array by n bytes left
|
||||
func shiftNRuneBytes(rb [4]byte, n int) [4]byte {
|
||||
switch n {
|
||||
case 0:
|
||||
return rb
|
||||
case 1:
|
||||
return [4]byte{rb[1], rb[2], rb[3], 0}
|
||||
case 2:
|
||||
return [4]byte{rb[2], rb[3]}
|
||||
case 3:
|
||||
return [4]byte{rb[3]}
|
||||
default:
|
||||
return [4]byte{}
|
||||
}
|
||||
}
|
||||
|
||||
// Recursive case-insensitive lookup function used by n.findCaseInsensitivePath
|
||||
// nolint:gocyclo
|
||||
func (n *Node) findCaseInsensitivePathRec(path string, ciPath []byte, rb [4]byte, fixTrailingSlash bool) []byte {
|
||||
npLen := len(n.path)
|
||||
|
||||
walk: // Outer loop for walking the tree
|
||||
for len(path) >= npLen && (npLen == 0 || strings.EqualFold(path[1:npLen], n.path[1:])) {
|
||||
// Add common prefix to result
|
||||
oldPath := path
|
||||
path = path[npLen:]
|
||||
ciPath = append(ciPath, n.path...)
|
||||
|
||||
if len(path) == 0 {
|
||||
// We should have reached the Node containing the handle.
|
||||
// Check if this Node has a handle registered.
|
||||
if n.handler != nil {
|
||||
return ciPath
|
||||
}
|
||||
|
||||
// No handle found.
|
||||
// Try to fix the path by adding a trailing slash
|
||||
if fixTrailingSlash {
|
||||
for i, c := range []byte(n.indices) {
|
||||
if c == '/' {
|
||||
n = n.children[i]
|
||||
if (len(n.path) == 1 && n.handler != nil) ||
|
||||
(n.nType == catchAll && n.children[0].handler != nil) {
|
||||
return append(ciPath, '/')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If this Node does not have a wildcard (param or catchAll) child,
|
||||
// we can just look up the next child Node and continue to walk down
|
||||
// the tree
|
||||
if !n.wildChild {
|
||||
// Skip rune bytes already processed
|
||||
rb = shiftNRuneBytes(rb, npLen)
|
||||
|
||||
if rb[0] != 0 {
|
||||
// Old rune not finished
|
||||
idxc := rb[0]
|
||||
for i, c := range []byte(n.indices) {
|
||||
if c == idxc {
|
||||
// continue with child Node
|
||||
n = n.children[i]
|
||||
npLen = len(n.path)
|
||||
continue walk
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Process a new rune
|
||||
var rv rune
|
||||
|
||||
// Find rune start.
|
||||
// Runes are up to 4 byte long,
|
||||
// -4 would definitely be another rune.
|
||||
var off int
|
||||
for max := min(npLen, 3); off < max; off++ {
|
||||
if i := npLen - off; utf8.RuneStart(oldPath[i]) {
|
||||
// read rune from cached path
|
||||
rv, _ = utf8.DecodeRuneInString(oldPath[i:])
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate lowercase bytes of current rune
|
||||
lo := unicode.ToLower(rv)
|
||||
utf8.EncodeRune(rb[:], lo)
|
||||
|
||||
// Skip already processed bytes
|
||||
rb = shiftNRuneBytes(rb, off)
|
||||
|
||||
idxc := rb[0]
|
||||
for i, c := range []byte(n.indices) {
|
||||
// Lowercase matches
|
||||
if c == idxc {
|
||||
// must use a recursive approach since both the
|
||||
// uppercase byte and the lowercase byte might exist
|
||||
// as an index
|
||||
if out := n.children[i].findCaseInsensitivePathRec(
|
||||
path, ciPath, rb, fixTrailingSlash,
|
||||
); out != nil {
|
||||
return out
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we found no match, the same for the uppercase rune,
|
||||
// if it differs
|
||||
if up := unicode.ToUpper(rv); up != lo {
|
||||
utf8.EncodeRune(rb[:], up)
|
||||
rb = shiftNRuneBytes(rb, off)
|
||||
|
||||
idxc := rb[0]
|
||||
for i, c := range []byte(n.indices) {
|
||||
// Uppercase matches
|
||||
if c == idxc {
|
||||
// Continue with child Node
|
||||
n = n.children[i]
|
||||
npLen = len(n.path)
|
||||
continue walk
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing found. We can recommend to redirect to the same URL
|
||||
// without a trailing slash if a leaf exists for that path
|
||||
if fixTrailingSlash && path == "/" && n.handler != nil {
|
||||
return ciPath
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
n = n.children[0]
|
||||
switch n.nType {
|
||||
case param:
|
||||
// Find param end (either '/' or path end)
|
||||
end := 0
|
||||
for end < len(path) && path[end] != '/' {
|
||||
end++
|
||||
}
|
||||
|
||||
// Add param value to case insensitive path
|
||||
ciPath = append(ciPath, path[:end]...)
|
||||
|
||||
// We need to go deeper!
|
||||
if end < len(path) {
|
||||
if len(n.children) > 0 {
|
||||
// Continue with child Node
|
||||
n = n.children[0]
|
||||
npLen = len(n.path)
|
||||
path = path[end:]
|
||||
continue
|
||||
}
|
||||
|
||||
// ... but we can't
|
||||
if fixTrailingSlash && len(path) == end+1 {
|
||||
return ciPath
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if n.handler != nil {
|
||||
return ciPath
|
||||
}
|
||||
|
||||
if fixTrailingSlash && len(n.children) == 1 {
|
||||
// No handle found. Check if a handle for this path + a
|
||||
// trailing slash exists
|
||||
n = n.children[0]
|
||||
if n.path == "/" && n.handler != nil {
|
||||
return append(ciPath, '/')
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
case catchAll:
|
||||
return append(ciPath, path...)
|
||||
|
||||
default:
|
||||
panic("invalid Node type")
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing found.
|
||||
// Try to fix the path by adding / removing a trailing slash
|
||||
if fixTrailingSlash {
|
||||
if path == "/" {
|
||||
return ciPath
|
||||
}
|
||||
if len(path)+1 == npLen && n.path[len(path)] == '/' &&
|
||||
strings.EqualFold(path[1:], n.path[1:len(path)]) && n.handler != nil {
|
||||
return append(ciPath, n.path...)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
891
pkg/services/live/pipeline/tree/tree_test.go
Normal file
891
pkg/services/live/pipeline/tree/tree_test.go
Normal file
@ -0,0 +1,891 @@
|
||||
// Copyright 2013 Julien Schmidt. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE
|
||||
|
||||
package tree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Used as a workaround since we can't compare functions or their addresses
|
||||
var fakeHandlerValue string
|
||||
|
||||
func fakeHandler(val string) Handler {
|
||||
return func(http.ResponseWriter, *http.Request, Params) {
|
||||
fakeHandlerValue = val
|
||||
}
|
||||
}
|
||||
|
||||
type testRequests []struct {
|
||||
path string
|
||||
nilHandler bool
|
||||
route string
|
||||
ps Params
|
||||
}
|
||||
|
||||
func getParams() *Params {
|
||||
ps := make(Params, 0, 20)
|
||||
return &ps
|
||||
}
|
||||
|
||||
func checkRequests(t *testing.T, tree *Node, requests testRequests, unescapes ...bool) {
|
||||
unescape := false
|
||||
if len(unescapes) >= 1 {
|
||||
unescape = unescapes[0]
|
||||
}
|
||||
|
||||
for _, request := range requests {
|
||||
value := tree.getValue(request.path, getParams(), unescape)
|
||||
|
||||
if value.Handler == nil {
|
||||
if !request.nilHandler {
|
||||
t.Errorf("handle mismatch for route '%s': Expected non-nil handle", request.path)
|
||||
}
|
||||
} else if request.nilHandler {
|
||||
t.Errorf("handle mismatch for route '%s': Expected nil handle", request.path)
|
||||
} else {
|
||||
handler, ok := value.Handler.(func(http.ResponseWriter, *http.Request, Params))
|
||||
if !ok {
|
||||
t.Errorf("invalid handler type for route '%s': %T", request.path, value.Handler)
|
||||
}
|
||||
handler(nil, nil, nil)
|
||||
if fakeHandlerValue != request.route {
|
||||
t.Errorf("handle mismatch for route '%s': Wrong handle (%s != %s)", request.path, fakeHandlerValue, request.route)
|
||||
}
|
||||
}
|
||||
|
||||
if value.Params != nil {
|
||||
if !reflect.DeepEqual(*value.Params, request.ps) {
|
||||
t.Errorf("Params mismatch for route '%s'", request.path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkPriorities(t *testing.T, n *Node) uint32 {
|
||||
var prio uint32
|
||||
for i := range n.children {
|
||||
prio += checkPriorities(t, n.children[i])
|
||||
}
|
||||
|
||||
if n.handler != nil {
|
||||
prio++
|
||||
}
|
||||
|
||||
if n.priority != prio {
|
||||
t.Errorf(
|
||||
"priority mismatch for Node '%s': is %d, should be %d",
|
||||
n.path, n.priority, prio,
|
||||
)
|
||||
}
|
||||
|
||||
return prio
|
||||
}
|
||||
|
||||
func TestCountParams(t *testing.T) {
|
||||
if countParams("/path/:param1/static/*catch-all") != 2 {
|
||||
t.Fail()
|
||||
}
|
||||
if countParams(strings.Repeat("/:param", 256)) != 256 {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeAddAndGet(t *testing.T) {
|
||||
tree := &Node{}
|
||||
|
||||
routes := [...]string{
|
||||
"/hi",
|
||||
"/contact",
|
||||
"/co",
|
||||
"/c",
|
||||
"/a",
|
||||
"/ab",
|
||||
"/doc/",
|
||||
"/doc/go_faq.html",
|
||||
"/doc/go1.html",
|
||||
"/α",
|
||||
"/β",
|
||||
}
|
||||
for _, route := range routes {
|
||||
tree.addRoute(route, fakeHandler(route))
|
||||
}
|
||||
|
||||
checkRequests(t, tree, testRequests{
|
||||
{"/a", false, "/a", nil},
|
||||
{"/", true, "", nil},
|
||||
{"/hi", false, "/hi", nil},
|
||||
{"/contact", false, "/contact", nil},
|
||||
{"/co", false, "/co", nil},
|
||||
{"/con", true, "", nil}, // key mismatch
|
||||
{"/cona", true, "", nil}, // key mismatch
|
||||
{"/no", true, "", nil}, // no matching child
|
||||
{"/ab", false, "/ab", nil},
|
||||
{"/α", false, "/α", nil},
|
||||
{"/β", false, "/β", nil},
|
||||
})
|
||||
|
||||
checkPriorities(t, tree)
|
||||
}
|
||||
|
||||
func TestTreeWildcard(t *testing.T) {
|
||||
tree := &Node{}
|
||||
|
||||
routes := [...]string{
|
||||
"/",
|
||||
"/cmd/:tool/",
|
||||
"/cmd/:tool/:sub",
|
||||
"/cmd/whoami",
|
||||
"/cmd/whoami/root",
|
||||
"/cmd/whoami/root/",
|
||||
"/src/*filepath",
|
||||
"/search/",
|
||||
"/search/:query",
|
||||
"/search/gin-gonic",
|
||||
"/search/google",
|
||||
"/user_:name",
|
||||
"/user_:name/about",
|
||||
"/files/:dir/*filepath",
|
||||
"/doc/",
|
||||
"/doc/go_faq.html",
|
||||
"/doc/go1.html",
|
||||
"/info/:user/public",
|
||||
"/info/:user/project/:project",
|
||||
"/info/:user/project/golang",
|
||||
"/aa/*xx",
|
||||
"/ab/*xx",
|
||||
"/:cc",
|
||||
"/:cc/cc",
|
||||
"/:cc/:dd/ee",
|
||||
"/:cc/:dd/:ee/ff",
|
||||
"/:cc/:dd/:ee/:ff/gg",
|
||||
"/:cc/:dd/:ee/:ff/:gg/hh",
|
||||
"/get/test/abc/",
|
||||
"/get/:param/abc/",
|
||||
"/something/:paramname/thirdthing",
|
||||
"/something/secondthing/test",
|
||||
"/get/abc",
|
||||
"/get/:param",
|
||||
"/get/abc/123abc",
|
||||
"/get/abc/:param",
|
||||
"/get/abc/123abc/xxx8",
|
||||
"/get/abc/123abc/:param",
|
||||
"/get/abc/123abc/xxx8/1234",
|
||||
"/get/abc/123abc/xxx8/:param",
|
||||
"/get/abc/123abc/xxx8/1234/ffas",
|
||||
"/get/abc/123abc/xxx8/1234/:param",
|
||||
"/get/abc/123abc/xxx8/1234/kkdd/12c",
|
||||
"/get/abc/123abc/xxx8/1234/kkdd/:param",
|
||||
"/get/abc/:param/test",
|
||||
"/get/abc/123abd/:param",
|
||||
"/get/abc/123abddd/:param",
|
||||
"/get/abc/123/:param",
|
||||
"/get/abc/123abg/:param",
|
||||
"/get/abc/123abf/:param",
|
||||
"/get/abc/123abfff/:param",
|
||||
}
|
||||
for _, route := range routes {
|
||||
tree.addRoute(route, fakeHandler(route))
|
||||
}
|
||||
|
||||
checkRequests(t, tree, testRequests{
|
||||
{"/", false, "/", nil},
|
||||
{"/cmd/test", true, "/cmd/:tool/", Params{Param{"tool", "test"}}},
|
||||
{"/cmd/test/", false, "/cmd/:tool/", Params{Param{"tool", "test"}}},
|
||||
{"/cmd/test/3", false, "/cmd/:tool/:sub", Params{Param{Key: "tool", Value: "test"}, Param{Key: "sub", Value: "3"}}},
|
||||
{"/cmd/who", true, "/cmd/:tool/", Params{Param{"tool", "who"}}},
|
||||
{"/cmd/who/", false, "/cmd/:tool/", Params{Param{"tool", "who"}}},
|
||||
{"/cmd/whoami", false, "/cmd/whoami", nil},
|
||||
{"/cmd/whoami/", true, "/cmd/whoami", nil},
|
||||
{"/cmd/whoami/r", false, "/cmd/:tool/:sub", Params{Param{Key: "tool", Value: "whoami"}, Param{Key: "sub", Value: "r"}}},
|
||||
{"/cmd/whoami/r/", true, "/cmd/:tool/:sub", Params{Param{Key: "tool", Value: "whoami"}, Param{Key: "sub", Value: "r"}}},
|
||||
{"/cmd/whoami/root", false, "/cmd/whoami/root", nil},
|
||||
{"/cmd/whoami/root/", false, "/cmd/whoami/root/", nil},
|
||||
{"/src/", false, "/src/*filepath", Params{Param{Key: "filepath", Value: "/"}}},
|
||||
{"/src/some/file.png", false, "/src/*filepath", Params{Param{Key: "filepath", Value: "/some/file.png"}}},
|
||||
{"/search/", false, "/search/", nil},
|
||||
{"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{Key: "query", Value: "someth!ng+in+ünìcodé"}}},
|
||||
{"/search/someth!ng+in+ünìcodé/", true, "", Params{Param{Key: "query", Value: "someth!ng+in+ünìcodé"}}},
|
||||
{"/search/gin", false, "/search/:query", Params{Param{"query", "gin"}}},
|
||||
{"/search/gin-gonic", false, "/search/gin-gonic", nil},
|
||||
{"/search/google", false, "/search/google", nil},
|
||||
{"/user_gopher", false, "/user_:name", Params{Param{Key: "name", Value: "gopher"}}},
|
||||
{"/user_gopher/about", false, "/user_:name/about", Params{Param{Key: "name", Value: "gopher"}}},
|
||||
{"/files/js/inc/framework.js", false, "/files/:dir/*filepath", Params{Param{Key: "dir", Value: "js"}, Param{Key: "filepath", Value: "/inc/framework.js"}}},
|
||||
{"/info/gordon/public", false, "/info/:user/public", Params{Param{Key: "user", Value: "gordon"}}},
|
||||
{"/info/gordon/project/go", false, "/info/:user/project/:project", Params{Param{Key: "user", Value: "gordon"}, Param{Key: "project", Value: "go"}}},
|
||||
{"/info/gordon/project/golang", false, "/info/:user/project/golang", Params{Param{Key: "user", Value: "gordon"}}},
|
||||
{"/aa/aa", false, "/aa/*xx", Params{Param{Key: "xx", Value: "/aa"}}},
|
||||
{"/ab/ab", false, "/ab/*xx", Params{Param{Key: "xx", Value: "/ab"}}},
|
||||
{"/a", false, "/:cc", Params{Param{Key: "cc", Value: "a"}}},
|
||||
// * Error with argument being intercepted
|
||||
// new PR handle (/all /all/cc /a/cc)
|
||||
// fix PR: https://github.com/gin-gonic/gin/pull/2796
|
||||
{"/all", false, "/:cc", Params{Param{Key: "cc", Value: "all"}}},
|
||||
{"/d", false, "/:cc", Params{Param{Key: "cc", Value: "d"}}},
|
||||
{"/ad", false, "/:cc", Params{Param{Key: "cc", Value: "ad"}}},
|
||||
{"/dd", false, "/:cc", Params{Param{Key: "cc", Value: "dd"}}},
|
||||
{"/dddaa", false, "/:cc", Params{Param{Key: "cc", Value: "dddaa"}}},
|
||||
{"/aa", false, "/:cc", Params{Param{Key: "cc", Value: "aa"}}},
|
||||
{"/aaa", false, "/:cc", Params{Param{Key: "cc", Value: "aaa"}}},
|
||||
{"/aaa/cc", false, "/:cc/cc", Params{Param{Key: "cc", Value: "aaa"}}},
|
||||
{"/ab", false, "/:cc", Params{Param{Key: "cc", Value: "ab"}}},
|
||||
{"/abb", false, "/:cc", Params{Param{Key: "cc", Value: "abb"}}},
|
||||
{"/abb/cc", false, "/:cc/cc", Params{Param{Key: "cc", Value: "abb"}}},
|
||||
{"/allxxxx", false, "/:cc", Params{Param{Key: "cc", Value: "allxxxx"}}},
|
||||
{"/alldd", false, "/:cc", Params{Param{Key: "cc", Value: "alldd"}}},
|
||||
{"/all/cc", false, "/:cc/cc", Params{Param{Key: "cc", Value: "all"}}},
|
||||
{"/a/cc", false, "/:cc/cc", Params{Param{Key: "cc", Value: "a"}}},
|
||||
{"/cc/cc", false, "/:cc/cc", Params{Param{Key: "cc", Value: "cc"}}},
|
||||
{"/ccc/cc", false, "/:cc/cc", Params{Param{Key: "cc", Value: "ccc"}}},
|
||||
{"/deedwjfs/cc", false, "/:cc/cc", Params{Param{Key: "cc", Value: "deedwjfs"}}},
|
||||
{"/acllcc/cc", false, "/:cc/cc", Params{Param{Key: "cc", Value: "acllcc"}}},
|
||||
{"/get/test/abc/", false, "/get/test/abc/", nil},
|
||||
{"/get/te/abc/", false, "/get/:param/abc/", Params{Param{Key: "param", Value: "te"}}},
|
||||
{"/get/testaa/abc/", false, "/get/:param/abc/", Params{Param{Key: "param", Value: "testaa"}}},
|
||||
{"/get/xx/abc/", false, "/get/:param/abc/", Params{Param{Key: "param", Value: "xx"}}},
|
||||
{"/get/tt/abc/", false, "/get/:param/abc/", Params{Param{Key: "param", Value: "tt"}}},
|
||||
{"/get/a/abc/", false, "/get/:param/abc/", Params{Param{Key: "param", Value: "a"}}},
|
||||
{"/get/t/abc/", false, "/get/:param/abc/", Params{Param{Key: "param", Value: "t"}}},
|
||||
{"/get/aa/abc/", false, "/get/:param/abc/", Params{Param{Key: "param", Value: "aa"}}},
|
||||
{"/get/abas/abc/", false, "/get/:param/abc/", Params{Param{Key: "param", Value: "abas"}}},
|
||||
{"/something/secondthing/test", false, "/something/secondthing/test", nil},
|
||||
{"/something/abcdad/thirdthing", false, "/something/:paramname/thirdthing", Params{Param{Key: "paramname", Value: "abcdad"}}},
|
||||
{"/something/secondthingaaaa/thirdthing", false, "/something/:paramname/thirdthing", Params{Param{Key: "paramname", Value: "secondthingaaaa"}}},
|
||||
{"/something/se/thirdthing", false, "/something/:paramname/thirdthing", Params{Param{Key: "paramname", Value: "se"}}},
|
||||
{"/something/s/thirdthing", false, "/something/:paramname/thirdthing", Params{Param{Key: "paramname", Value: "s"}}},
|
||||
{"/c/d/ee", false, "/:cc/:dd/ee", Params{Param{Key: "cc", Value: "c"}, Param{Key: "dd", Value: "d"}}},
|
||||
{"/c/d/e/ff", false, "/:cc/:dd/:ee/ff", Params{Param{Key: "cc", Value: "c"}, Param{Key: "dd", Value: "d"}, Param{Key: "ee", Value: "e"}}},
|
||||
{"/c/d/e/f/gg", false, "/:cc/:dd/:ee/:ff/gg", Params{Param{Key: "cc", Value: "c"}, Param{Key: "dd", Value: "d"}, Param{Key: "ee", Value: "e"}, Param{Key: "ff", Value: "f"}}},
|
||||
{"/c/d/e/f/g/hh", false, "/:cc/:dd/:ee/:ff/:gg/hh", Params{Param{Key: "cc", Value: "c"}, Param{Key: "dd", Value: "d"}, Param{Key: "ee", Value: "e"}, Param{Key: "ff", Value: "f"}, Param{Key: "gg", Value: "g"}}},
|
||||
{"/cc/dd/ee/ff/gg/hh", false, "/:cc/:dd/:ee/:ff/:gg/hh", Params{Param{Key: "cc", Value: "cc"}, Param{Key: "dd", Value: "dd"}, Param{Key: "ee", Value: "ee"}, Param{Key: "ff", Value: "ff"}, Param{Key: "gg", Value: "gg"}}},
|
||||
{"/get/abc", false, "/get/abc", nil},
|
||||
{"/get/a", false, "/get/:param", Params{Param{Key: "param", Value: "a"}}},
|
||||
{"/get/abz", false, "/get/:param", Params{Param{Key: "param", Value: "abz"}}},
|
||||
{"/get/12a", false, "/get/:param", Params{Param{Key: "param", Value: "12a"}}},
|
||||
{"/get/abcd", false, "/get/:param", Params{Param{Key: "param", Value: "abcd"}}},
|
||||
{"/get/abc/123abc", false, "/get/abc/123abc", nil},
|
||||
{"/get/abc/12", false, "/get/abc/:param", Params{Param{Key: "param", Value: "12"}}},
|
||||
{"/get/abc/123ab", false, "/get/abc/:param", Params{Param{Key: "param", Value: "123ab"}}},
|
||||
{"/get/abc/xyz", false, "/get/abc/:param", Params{Param{Key: "param", Value: "xyz"}}},
|
||||
{"/get/abc/123abcddxx", false, "/get/abc/:param", Params{Param{Key: "param", Value: "123abcddxx"}}},
|
||||
{"/get/abc/123abc/xxx8", false, "/get/abc/123abc/xxx8", nil},
|
||||
{"/get/abc/123abc/x", false, "/get/abc/123abc/:param", Params{Param{Key: "param", Value: "x"}}},
|
||||
{"/get/abc/123abc/xxx", false, "/get/abc/123abc/:param", Params{Param{Key: "param", Value: "xxx"}}},
|
||||
{"/get/abc/123abc/abc", false, "/get/abc/123abc/:param", Params{Param{Key: "param", Value: "abc"}}},
|
||||
{"/get/abc/123abc/xxx8xxas", false, "/get/abc/123abc/:param", Params{Param{Key: "param", Value: "xxx8xxas"}}},
|
||||
{"/get/abc/123abc/xxx8/1234", false, "/get/abc/123abc/xxx8/1234", nil},
|
||||
{"/get/abc/123abc/xxx8/1", false, "/get/abc/123abc/xxx8/:param", Params{Param{Key: "param", Value: "1"}}},
|
||||
{"/get/abc/123abc/xxx8/123", false, "/get/abc/123abc/xxx8/:param", Params{Param{Key: "param", Value: "123"}}},
|
||||
{"/get/abc/123abc/xxx8/78k", false, "/get/abc/123abc/xxx8/:param", Params{Param{Key: "param", Value: "78k"}}},
|
||||
{"/get/abc/123abc/xxx8/1234xxxd", false, "/get/abc/123abc/xxx8/:param", Params{Param{Key: "param", Value: "1234xxxd"}}},
|
||||
{"/get/abc/123abc/xxx8/1234/ffas", false, "/get/abc/123abc/xxx8/1234/ffas", nil},
|
||||
{"/get/abc/123abc/xxx8/1234/f", false, "/get/abc/123abc/xxx8/1234/:param", Params{Param{Key: "param", Value: "f"}}},
|
||||
{"/get/abc/123abc/xxx8/1234/ffa", false, "/get/abc/123abc/xxx8/1234/:param", Params{Param{Key: "param", Value: "ffa"}}},
|
||||
{"/get/abc/123abc/xxx8/1234/kka", false, "/get/abc/123abc/xxx8/1234/:param", Params{Param{Key: "param", Value: "kka"}}},
|
||||
{"/get/abc/123abc/xxx8/1234/ffas321", false, "/get/abc/123abc/xxx8/1234/:param", Params{Param{Key: "param", Value: "ffas321"}}},
|
||||
{"/get/abc/123abc/xxx8/1234/kkdd/12c", false, "/get/abc/123abc/xxx8/1234/kkdd/12c", nil},
|
||||
{"/get/abc/123abc/xxx8/1234/kkdd/1", false, "/get/abc/123abc/xxx8/1234/kkdd/:param", Params{Param{Key: "param", Value: "1"}}},
|
||||
{"/get/abc/123abc/xxx8/1234/kkdd/12", false, "/get/abc/123abc/xxx8/1234/kkdd/:param", Params{Param{Key: "param", Value: "12"}}},
|
||||
{"/get/abc/123abc/xxx8/1234/kkdd/12b", false, "/get/abc/123abc/xxx8/1234/kkdd/:param", Params{Param{Key: "param", Value: "12b"}}},
|
||||
{"/get/abc/123abc/xxx8/1234/kkdd/34", false, "/get/abc/123abc/xxx8/1234/kkdd/:param", Params{Param{Key: "param", Value: "34"}}},
|
||||
{"/get/abc/123abc/xxx8/1234/kkdd/12c2e3", false, "/get/abc/123abc/xxx8/1234/kkdd/:param", Params{Param{Key: "param", Value: "12c2e3"}}},
|
||||
{"/get/abc/12/test", false, "/get/abc/:param/test", Params{Param{Key: "param", Value: "12"}}},
|
||||
{"/get/abc/123abdd/test", false, "/get/abc/:param/test", Params{Param{Key: "param", Value: "123abdd"}}},
|
||||
{"/get/abc/123abdddf/test", false, "/get/abc/:param/test", Params{Param{Key: "param", Value: "123abdddf"}}},
|
||||
{"/get/abc/123ab/test", false, "/get/abc/:param/test", Params{Param{Key: "param", Value: "123ab"}}},
|
||||
{"/get/abc/123abgg/test", false, "/get/abc/:param/test", Params{Param{Key: "param", Value: "123abgg"}}},
|
||||
{"/get/abc/123abff/test", false, "/get/abc/:param/test", Params{Param{Key: "param", Value: "123abff"}}},
|
||||
{"/get/abc/123abffff/test", false, "/get/abc/:param/test", Params{Param{Key: "param", Value: "123abffff"}}},
|
||||
{"/get/abc/123abd/test", false, "/get/abc/123abd/:param", Params{Param{Key: "param", Value: "test"}}},
|
||||
{"/get/abc/123abddd/test", false, "/get/abc/123abddd/:param", Params{Param{Key: "param", Value: "test"}}},
|
||||
{"/get/abc/123/test22", false, "/get/abc/123/:param", Params{Param{Key: "param", Value: "test22"}}},
|
||||
{"/get/abc/123abg/test", false, "/get/abc/123abg/:param", Params{Param{Key: "param", Value: "test"}}},
|
||||
{"/get/abc/123abf/testss", false, "/get/abc/123abf/:param", Params{Param{Key: "param", Value: "testss"}}},
|
||||
{"/get/abc/123abfff/te", false, "/get/abc/123abfff/:param", Params{Param{Key: "param", Value: "te"}}},
|
||||
})
|
||||
|
||||
checkPriorities(t, tree)
|
||||
}
|
||||
|
||||
func TestUnescapeParameters(t *testing.T) {
|
||||
tree := &Node{}
|
||||
|
||||
routes := [...]string{
|
||||
"/",
|
||||
"/cmd/:tool/:sub",
|
||||
"/cmd/:tool/",
|
||||
"/src/*filepath",
|
||||
"/search/:query",
|
||||
"/files/:dir/*filepath",
|
||||
"/info/:user/project/:project",
|
||||
"/info/:user",
|
||||
}
|
||||
for _, route := range routes {
|
||||
tree.addRoute(route, fakeHandler(route))
|
||||
}
|
||||
|
||||
checkRequests(t, tree, testRequests{
|
||||
{"/", false, "/", nil},
|
||||
{"/cmd/test/", false, "/cmd/:tool/", Params{Param{Key: "tool", Value: "test"}}},
|
||||
{"/cmd/test", true, "", Params{Param{Key: "tool", Value: "test"}}},
|
||||
{"/src/some/file.png", false, "/src/*filepath", Params{Param{Key: "filepath", Value: "/some/file.png"}}},
|
||||
{"/src/some/file+test.png", false, "/src/*filepath", Params{Param{Key: "filepath", Value: "/some/file test.png"}}},
|
||||
{"/src/some/file++++%%%%test.png", false, "/src/*filepath", Params{Param{Key: "filepath", Value: "/some/file++++%%%%test.png"}}},
|
||||
{"/src/some/file%2Ftest.png", false, "/src/*filepath", Params{Param{Key: "filepath", Value: "/some/file/test.png"}}},
|
||||
{"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{Key: "query", Value: "someth!ng in ünìcodé"}}},
|
||||
{"/info/gordon/project/go", false, "/info/:user/project/:project", Params{Param{Key: "user", Value: "gordon"}, Param{Key: "project", Value: "go"}}},
|
||||
{"/info/slash%2Fgordon", false, "/info/:user", Params{Param{Key: "user", Value: "slash/gordon"}}},
|
||||
{"/info/slash%2Fgordon/project/Project%20%231", false, "/info/:user/project/:project", Params{Param{Key: "user", Value: "slash/gordon"}, Param{Key: "project", Value: "Project #1"}}},
|
||||
{"/info/slash%%%%", false, "/info/:user", Params{Param{Key: "user", Value: "slash%%%%"}}},
|
||||
{"/info/slash%%%%2Fgordon/project/Project%%%%20%231", false, "/info/:user/project/:project", Params{Param{Key: "user", Value: "slash%%%%2Fgordon"}, Param{Key: "project", Value: "Project%%%%20%231"}}},
|
||||
}, true)
|
||||
|
||||
checkPriorities(t, tree)
|
||||
}
|
||||
|
||||
func catchPanic(testFunc func()) (recv interface{}) {
|
||||
defer func() {
|
||||
recv = recover()
|
||||
}()
|
||||
|
||||
testFunc()
|
||||
return
|
||||
}
|
||||
|
||||
type testRoute struct {
|
||||
path string
|
||||
conflict bool
|
||||
}
|
||||
|
||||
func testRoutes(t *testing.T, routes []testRoute) {
|
||||
tree := &Node{}
|
||||
|
||||
for _, route := range routes {
|
||||
recv := catchPanic(func() {
|
||||
tree.addRoute(route.path, nil)
|
||||
})
|
||||
|
||||
if route.conflict {
|
||||
if recv == nil {
|
||||
t.Errorf("no panic for conflicting route '%s'", route.path)
|
||||
}
|
||||
} else if recv != nil {
|
||||
t.Errorf("unexpected panic for route '%s': %v", route.path, recv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeWildcardConflict(t *testing.T) {
|
||||
routes := []testRoute{
|
||||
{"/cmd/:tool/:sub", false},
|
||||
{"/cmd/vet", false},
|
||||
{"/foo/bar", false},
|
||||
{"/foo/:name", false},
|
||||
{"/foo/:names", true},
|
||||
{"/cmd/*path", true},
|
||||
{"/cmd/:badvar", true},
|
||||
{"/cmd/:tool/names", false},
|
||||
{"/cmd/:tool/:badsub/details", true},
|
||||
{"/src/*filepath", false},
|
||||
{"/src/:file", true},
|
||||
{"/src/static.json", true},
|
||||
{"/src/*filepathx", true},
|
||||
{"/src/", true},
|
||||
{"/src/foo/bar", true},
|
||||
{"/src1/", false},
|
||||
{"/src1/*filepath", true},
|
||||
{"/src2*filepath", true},
|
||||
{"/src2/*filepath", false},
|
||||
{"/search/:query", false},
|
||||
{"/search/valid", false},
|
||||
{"/user_:name", false},
|
||||
{"/user_x", false},
|
||||
{"/user_:name", false},
|
||||
{"/id:id", false},
|
||||
{"/id/:id", false},
|
||||
}
|
||||
testRoutes(t, routes)
|
||||
}
|
||||
|
||||
func TestCatchAllAfterSlash(t *testing.T) {
|
||||
routes := []testRoute{
|
||||
{"/non-leading-*catchall", true},
|
||||
}
|
||||
testRoutes(t, routes)
|
||||
}
|
||||
|
||||
func TestTreeChildConflict(t *testing.T) {
|
||||
routes := []testRoute{
|
||||
{"/cmd/vet", false},
|
||||
{"/cmd/:tool", false},
|
||||
{"/cmd/:tool/:sub", false},
|
||||
{"/cmd/:tool/misc", false},
|
||||
{"/cmd/:tool/:othersub", true},
|
||||
{"/src/AUTHORS", false},
|
||||
{"/src/*filepath", true},
|
||||
{"/user_x", false},
|
||||
{"/user_:name", false},
|
||||
{"/id/:id", false},
|
||||
{"/id:id", false},
|
||||
{"/:id", false},
|
||||
{"/*filepath", true},
|
||||
}
|
||||
testRoutes(t, routes)
|
||||
}
|
||||
|
||||
func TestTreeDuplicatePath(t *testing.T) {
|
||||
tree := &Node{}
|
||||
|
||||
routes := [...]string{
|
||||
"/",
|
||||
"/doc/",
|
||||
"/src/*filepath",
|
||||
"/search/:query",
|
||||
"/user_:name",
|
||||
}
|
||||
for _, route := range routes {
|
||||
recv := catchPanic(func() {
|
||||
tree.addRoute(route, fakeHandler(route))
|
||||
})
|
||||
if recv != nil {
|
||||
t.Fatalf("panic inserting route '%s': %v", route, recv)
|
||||
}
|
||||
|
||||
// Add again
|
||||
recv = catchPanic(func() {
|
||||
tree.addRoute(route, nil)
|
||||
})
|
||||
if recv == nil {
|
||||
t.Fatalf("no panic while inserting duplicate route '%s", route)
|
||||
}
|
||||
}
|
||||
|
||||
//printChildren(tree, "")
|
||||
|
||||
checkRequests(t, tree, testRequests{
|
||||
{"/", false, "/", nil},
|
||||
{"/doc/", false, "/doc/", nil},
|
||||
{"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}},
|
||||
{"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}},
|
||||
{"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}},
|
||||
})
|
||||
}
|
||||
|
||||
func TestEmptyWildcardName(t *testing.T) {
|
||||
tree := &Node{}
|
||||
|
||||
routes := [...]string{
|
||||
"/user:",
|
||||
"/user:/",
|
||||
"/cmd/:/",
|
||||
"/src/*",
|
||||
}
|
||||
for _, route := range routes {
|
||||
recv := catchPanic(func() {
|
||||
tree.addRoute(route, nil)
|
||||
})
|
||||
if recv == nil {
|
||||
t.Fatalf("no panic while inserting route with empty wildcard name '%s", route)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeCatchAllConflict(t *testing.T) {
|
||||
routes := []testRoute{
|
||||
{"/src/*filepath/x", true},
|
||||
{"/src2/", false},
|
||||
{"/src2/*filepath/x", true},
|
||||
{"/src3/*filepath", false},
|
||||
{"/src3/*filepath/x", true},
|
||||
}
|
||||
testRoutes(t, routes)
|
||||
}
|
||||
|
||||
func TestTreeCatchAllConflictRoot(t *testing.T) {
|
||||
routes := []testRoute{
|
||||
{"/", false},
|
||||
{"/*filepath", true},
|
||||
}
|
||||
testRoutes(t, routes)
|
||||
}
|
||||
|
||||
func TestTreeCatchMaxParams(t *testing.T) {
|
||||
tree := &Node{}
|
||||
var route = "/cmd/*filepath"
|
||||
tree.addRoute(route, fakeHandler(route))
|
||||
}
|
||||
|
||||
func TestTreeDoubleWildcard(t *testing.T) {
|
||||
const panicMsg = "only one wildcard per path segment is allowed"
|
||||
|
||||
routes := [...]string{
|
||||
"/:foo:bar",
|
||||
"/:foo:bar/",
|
||||
"/:foo*bar",
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
tree := &Node{}
|
||||
recv := catchPanic(func() {
|
||||
tree.addRoute(route, nil)
|
||||
})
|
||||
|
||||
if rs, ok := recv.(string); !ok || !strings.HasPrefix(rs, panicMsg) {
|
||||
t.Fatalf(`"Expected panic "%s" for route '%s', got "%v"`, panicMsg, route, recv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*func TestTreeDuplicateWildcard(t *testing.T) {
|
||||
tree := &Node{}
|
||||
routes := [...]string{
|
||||
"/:id/:name/:id",
|
||||
}
|
||||
for _, route := range routes {
|
||||
...
|
||||
}
|
||||
}*/
|
||||
|
||||
func TestTreeTrailingSlashRedirect(t *testing.T) {
|
||||
tree := &Node{}
|
||||
|
||||
routes := [...]string{
|
||||
"/hi",
|
||||
"/b/",
|
||||
"/search/:query",
|
||||
"/cmd/:tool/",
|
||||
"/src/*filepath",
|
||||
"/x",
|
||||
"/x/y",
|
||||
"/y/",
|
||||
"/y/z",
|
||||
"/0/:id",
|
||||
"/0/:id/1",
|
||||
"/1/:id/",
|
||||
"/1/:id/2",
|
||||
"/aa",
|
||||
"/a/",
|
||||
"/admin",
|
||||
"/admin/:category",
|
||||
"/admin/:category/:page",
|
||||
"/doc",
|
||||
"/doc/go_faq.html",
|
||||
"/doc/go1.html",
|
||||
"/no/a",
|
||||
"/no/b",
|
||||
"/api/hello/:name",
|
||||
}
|
||||
for _, route := range routes {
|
||||
recv := catchPanic(func() {
|
||||
tree.addRoute(route, fakeHandler(route))
|
||||
})
|
||||
if recv != nil {
|
||||
t.Fatalf("panic inserting route '%s': %v", route, recv)
|
||||
}
|
||||
}
|
||||
|
||||
tsrRoutes := [...]string{
|
||||
"/hi/",
|
||||
"/b",
|
||||
"/search/gopher/",
|
||||
"/cmd/vet",
|
||||
"/src",
|
||||
"/x/",
|
||||
"/y",
|
||||
"/0/go/",
|
||||
"/1/go",
|
||||
"/a",
|
||||
"/admin/",
|
||||
"/admin/config/",
|
||||
"/admin/config/permissions/",
|
||||
"/doc/",
|
||||
}
|
||||
for _, route := range tsrRoutes {
|
||||
value := tree.getValue(route, nil, false)
|
||||
if value.Handler != nil {
|
||||
t.Fatalf("non-nil handler for TSR route '%s", route)
|
||||
} else if !value.Tsr {
|
||||
t.Errorf("expected TSR recommendation for route '%s'", route)
|
||||
}
|
||||
}
|
||||
|
||||
noTsrRoutes := [...]string{
|
||||
"/",
|
||||
"/no",
|
||||
"/no/",
|
||||
"/_",
|
||||
"/_/",
|
||||
"/api/world/abc",
|
||||
}
|
||||
for _, route := range noTsrRoutes {
|
||||
value := tree.getValue(route, nil, false)
|
||||
if value.Handler != nil {
|
||||
t.Fatalf("non-nil handler for No-TSR route '%s", route)
|
||||
} else if value.Tsr {
|
||||
t.Errorf("expected no TSR recommendation for route '%s'", route)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeRootTrailingSlashRedirect(t *testing.T) {
|
||||
tree := &Node{}
|
||||
|
||||
recv := catchPanic(func() {
|
||||
tree.addRoute("/:test", fakeHandler("/:test"))
|
||||
})
|
||||
if recv != nil {
|
||||
t.Fatalf("panic inserting test route: %v", recv)
|
||||
}
|
||||
|
||||
value := tree.getValue("/", nil, false)
|
||||
if value.Handler != nil {
|
||||
t.Fatalf("non-nil handler")
|
||||
} else if value.Tsr {
|
||||
t.Errorf("expected no TSR recommendation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeFindCaseInsensitivePath(t *testing.T) {
|
||||
tree := &Node{}
|
||||
|
||||
longPath := "/l" + strings.Repeat("o", 128) + "ng"
|
||||
lOngPath := "/l" + strings.Repeat("O", 128) + "ng/"
|
||||
|
||||
routes := [...]string{
|
||||
"/hi",
|
||||
"/b/",
|
||||
"/ABC/",
|
||||
"/search/:query",
|
||||
"/cmd/:tool/",
|
||||
"/src/*filepath",
|
||||
"/x",
|
||||
"/x/y",
|
||||
"/y/",
|
||||
"/y/z",
|
||||
"/0/:id",
|
||||
"/0/:id/1",
|
||||
"/1/:id/",
|
||||
"/1/:id/2",
|
||||
"/aa",
|
||||
"/a/",
|
||||
"/doc",
|
||||
"/doc/go_faq.html",
|
||||
"/doc/go1.html",
|
||||
"/doc/go/away",
|
||||
"/no/a",
|
||||
"/no/b",
|
||||
"/Π",
|
||||
"/u/apfêl/",
|
||||
"/u/äpfêl/",
|
||||
"/u/öpfêl",
|
||||
"/v/Äpfêl/",
|
||||
"/v/Öpfêl",
|
||||
"/w/♬", // 3 byte
|
||||
"/w/♭/", // 3 byte, last byte differs
|
||||
"/w/𠜎", // 4 byte
|
||||
"/w/𠜏/", // 4 byte
|
||||
longPath,
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
recv := catchPanic(func() {
|
||||
tree.addRoute(route, fakeHandler(route))
|
||||
})
|
||||
if recv != nil {
|
||||
t.Fatalf("panic inserting route '%s': %v", route, recv)
|
||||
}
|
||||
}
|
||||
|
||||
// Check out == in for all registered routes
|
||||
// With fixTrailingSlash = true
|
||||
for _, route := range routes {
|
||||
out, found := tree.findCaseInsensitivePath(route, true)
|
||||
if !found {
|
||||
t.Errorf("Route '%s' not found!", route)
|
||||
} else if string(out) != route {
|
||||
t.Errorf("Wrong result for route '%s': %s", route, string(out))
|
||||
}
|
||||
}
|
||||
// With fixTrailingSlash = false
|
||||
for _, route := range routes {
|
||||
out, found := tree.findCaseInsensitivePath(route, false)
|
||||
if !found {
|
||||
t.Errorf("Route '%s' not found!", route)
|
||||
} else if string(out) != route {
|
||||
t.Errorf("Wrong result for route '%s': %s", route, string(out))
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
in string
|
||||
out string
|
||||
found bool
|
||||
slash bool
|
||||
}{
|
||||
{"/HI", "/hi", true, false},
|
||||
{"/HI/", "/hi", true, true},
|
||||
{"/B", "/b/", true, true},
|
||||
{"/B/", "/b/", true, false},
|
||||
{"/abc", "/ABC/", true, true},
|
||||
{"/abc/", "/ABC/", true, false},
|
||||
{"/aBc", "/ABC/", true, true},
|
||||
{"/aBc/", "/ABC/", true, false},
|
||||
{"/abC", "/ABC/", true, true},
|
||||
{"/abC/", "/ABC/", true, false},
|
||||
{"/SEARCH/QUERY", "/search/QUERY", true, false},
|
||||
{"/SEARCH/QUERY/", "/search/QUERY", true, true},
|
||||
{"/CMD/TOOL/", "/cmd/TOOL/", true, false},
|
||||
{"/CMD/TOOL", "/cmd/TOOL/", true, true},
|
||||
{"/SRC/FILE/PATH", "/src/FILE/PATH", true, false},
|
||||
{"/x/Y", "/x/y", true, false},
|
||||
{"/x/Y/", "/x/y", true, true},
|
||||
{"/X/y", "/x/y", true, false},
|
||||
{"/X/y/", "/x/y", true, true},
|
||||
{"/X/Y", "/x/y", true, false},
|
||||
{"/X/Y/", "/x/y", true, true},
|
||||
{"/Y/", "/y/", true, false},
|
||||
{"/Y", "/y/", true, true},
|
||||
{"/Y/z", "/y/z", true, false},
|
||||
{"/Y/z/", "/y/z", true, true},
|
||||
{"/Y/Z", "/y/z", true, false},
|
||||
{"/Y/Z/", "/y/z", true, true},
|
||||
{"/y/Z", "/y/z", true, false},
|
||||
{"/y/Z/", "/y/z", true, true},
|
||||
{"/Aa", "/aa", true, false},
|
||||
{"/Aa/", "/aa", true, true},
|
||||
{"/AA", "/aa", true, false},
|
||||
{"/AA/", "/aa", true, true},
|
||||
{"/aA", "/aa", true, false},
|
||||
{"/aA/", "/aa", true, true},
|
||||
{"/A/", "/a/", true, false},
|
||||
{"/A", "/a/", true, true},
|
||||
{"/DOC", "/doc", true, false},
|
||||
{"/DOC/", "/doc", true, true},
|
||||
{"/NO", "", false, true},
|
||||
{"/DOC/GO", "", false, true},
|
||||
{"/π", "/Π", true, false},
|
||||
{"/π/", "/Π", true, true},
|
||||
{"/u/ÄPFÊL/", "/u/äpfêl/", true, false},
|
||||
{"/u/ÄPFÊL", "/u/äpfêl/", true, true},
|
||||
{"/u/ÖPFÊL/", "/u/öpfêl", true, true},
|
||||
{"/u/ÖPFÊL", "/u/öpfêl", true, false},
|
||||
{"/v/äpfêL/", "/v/Äpfêl/", true, false},
|
||||
{"/v/äpfêL", "/v/Äpfêl/", true, true},
|
||||
{"/v/öpfêL/", "/v/Öpfêl", true, true},
|
||||
{"/v/öpfêL", "/v/Öpfêl", true, false},
|
||||
{"/w/♬/", "/w/♬", true, true},
|
||||
{"/w/♭", "/w/♭/", true, true},
|
||||
{"/w/𠜎/", "/w/𠜎", true, true},
|
||||
{"/w/𠜏", "/w/𠜏/", true, true},
|
||||
{lOngPath, longPath, true, true},
|
||||
}
|
||||
// With fixTrailingSlash = true
|
||||
for _, test := range tests {
|
||||
out, found := tree.findCaseInsensitivePath(test.in, true)
|
||||
if found != test.found || (found && (string(out) != test.out)) {
|
||||
t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t",
|
||||
test.in, string(out), found, test.out, test.found)
|
||||
return
|
||||
}
|
||||
}
|
||||
// With fixTrailingSlash = false
|
||||
for _, test := range tests {
|
||||
out, found := tree.findCaseInsensitivePath(test.in, false)
|
||||
if test.slash {
|
||||
if found { // test needs a trailingSlash fix. It must not be found!
|
||||
t.Errorf("Found without fixTrailingSlash: %s; got %s", test.in, string(out))
|
||||
}
|
||||
} else {
|
||||
if found != test.found || (found && (string(out) != test.out)) {
|
||||
t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t",
|
||||
test.in, string(out), found, test.out, test.found)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeInvalidNodeType(t *testing.T) {
|
||||
const panicMsg = "invalid Node type"
|
||||
|
||||
tree := &Node{}
|
||||
tree.addRoute("/", fakeHandler("/"))
|
||||
tree.addRoute("/:page", fakeHandler("/:page"))
|
||||
|
||||
// set invalid Node type
|
||||
tree.children[0].nType = 42
|
||||
|
||||
// normal lookup
|
||||
recv := catchPanic(func() {
|
||||
tree.getValue("/test", nil, false)
|
||||
})
|
||||
if rs, ok := recv.(string); !ok || rs != panicMsg {
|
||||
t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv)
|
||||
}
|
||||
|
||||
// case-insensitive lookup
|
||||
recv = catchPanic(func() {
|
||||
tree.findCaseInsensitivePath("/test", true)
|
||||
})
|
||||
if rs, ok := recv.(string); !ok || rs != panicMsg {
|
||||
t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTreeInvalidParamsType(t *testing.T) {
|
||||
tree := &Node{}
|
||||
tree.wildChild = true
|
||||
tree.children = append(tree.children, &Node{})
|
||||
tree.children[0].nType = 2
|
||||
|
||||
// set invalid Params type
|
||||
params := make(Params, 0)
|
||||
|
||||
// try to trigger slice bounds out of range with capacity 0
|
||||
tree.getValue("/test", ¶ms, false)
|
||||
}
|
||||
|
||||
func TestTreeWildcardConflictEx(t *testing.T) {
|
||||
conflicts := [...]struct {
|
||||
route string
|
||||
segPath string
|
||||
existPath string
|
||||
existSegPath string
|
||||
}{
|
||||
{"/who/are/foo", "/foo", `/who/are/\*you`, `/\*you`},
|
||||
{"/who/are/foo/", "/foo/", `/who/are/\*you`, `/\*you`},
|
||||
{"/who/are/foo/bar", "/foo/bar", `/who/are/\*you`, `/\*you`},
|
||||
{"/con:nection", ":nection", `/con:tact`, `:tact`},
|
||||
}
|
||||
|
||||
for _, conflict := range conflicts {
|
||||
// I have to re-create a 'tree', because the 'tree' will be
|
||||
// in an inconsistent state when the loop recovers from the
|
||||
// panic which threw by 'addRoute' function.
|
||||
tree := &Node{}
|
||||
routes := [...]string{
|
||||
"/con:tact",
|
||||
"/who/are/*you",
|
||||
"/who/foo/hello",
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
tree.addRoute(route, fakeHandler(route))
|
||||
}
|
||||
|
||||
recv := catchPanic(func() {
|
||||
tree.addRoute(conflict.route, fakeHandler(conflict.route))
|
||||
})
|
||||
|
||||
if !regexp.MustCompile(fmt.Sprintf("'%s' in new path .* conflicts with existing wildcard '%s' in existing prefix '%s'", conflict.segPath, conflict.existSegPath, conflict.existPath)).MatchString(fmt.Sprint(recv)) {
|
||||
t.Fatalf("invalid wildcard conflict error (%v)", recv)
|
||||
}
|
||||
}
|
||||
}
|
@ -12,6 +12,8 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/live/convert"
|
||||
"github.com/grafana/grafana/pkg/services/live/pushurl"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
|
||||
liveDto "github.com/grafana/grafana-plugin-sdk-go/live"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -45,7 +47,7 @@ func (g *Gateway) Run(ctx context.Context) error {
|
||||
func (g *Gateway) Handle(ctx *models.ReqContext) {
|
||||
streamID := ctx.Params(":streamId")
|
||||
|
||||
stream, err := g.GrafanaLive.ManagedStreamRunner.GetOrCreateStream(ctx.SignedInUser.OrgId, streamID)
|
||||
stream, err := g.GrafanaLive.ManagedStreamRunner.GetOrCreateStream(ctx.SignedInUser.OrgId, liveDto.ScopeStream, streamID)
|
||||
if err != nil {
|
||||
logger.Error("Error getting stream", "error", err)
|
||||
ctx.Resp.WriteHeader(http.StatusInternalServerError)
|
||||
@ -92,3 +94,39 @@ func (g *Gateway) Handle(ctx *models.ReqContext) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Gateway) HandlePath(ctx *models.ReqContext) {
|
||||
streamID := ctx.Params(":streamId")
|
||||
path := ctx.Params(":path")
|
||||
|
||||
body, err := io.ReadAll(ctx.Req.Body)
|
||||
if err != nil {
|
||||
logger.Error("Error reading body", "error", err)
|
||||
ctx.Resp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
logger.Debug("Live channel push request",
|
||||
"protocol", "http",
|
||||
"streamId", streamID,
|
||||
"path", path,
|
||||
"bodyLength", len(body),
|
||||
)
|
||||
|
||||
channelID := "stream/" + streamID + "/" + path
|
||||
|
||||
ruleFound, err := g.GrafanaLive.Pipeline.ProcessInput(ctx.Req.Context(), ctx.OrgId, channelID, body)
|
||||
if err != nil {
|
||||
logger.Error("Pipeline input processing error", "error", err, "body", string(body))
|
||||
if errors.Is(err, liveDto.ErrInvalidChannelID) {
|
||||
ctx.Resp.WriteHeader(http.StatusBadRequest)
|
||||
} else {
|
||||
ctx.Resp.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
if !ruleFound {
|
||||
logger.Error("No conversion rule for a channel", "error", err, "channel", channelID)
|
||||
ctx.Resp.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/live/pushurl"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
liveDto "github.com/grafana/grafana-plugin-sdk-go/live"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -165,7 +166,7 @@ func (s *Handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||
break
|
||||
}
|
||||
|
||||
stream, err := s.managedStreamRunner.GetOrCreateStream(user.OrgId, streamID)
|
||||
stream, err := s.managedStreamRunner.GetOrCreateStream(user.OrgId, liveDto.ScopeStream, streamID)
|
||||
if err != nil {
|
||||
logger.Error("Error getting stream", "error", err)
|
||||
continue
|
||||
|
139
pkg/services/live/remotewrite/convert.go
Normal file
139
pkg/services/live/remotewrite/convert.go
Normal file
@ -0,0 +1,139 @@
|
||||
package remotewrite
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type table struct {
|
||||
First *unicode.RangeTable
|
||||
Rest *unicode.RangeTable
|
||||
}
|
||||
|
||||
var metricNameTable = table{
|
||||
First: &unicode.RangeTable{
|
||||
R16: []unicode.Range16{
|
||||
{0x003A, 0x003A, 1}, // :
|
||||
{0x0041, 0x005A, 1}, // A-Z
|
||||
{0x005F, 0x005F, 1}, // _
|
||||
{0x0061, 0x007A, 1}, // a-z
|
||||
},
|
||||
LatinOffset: 4,
|
||||
},
|
||||
Rest: &unicode.RangeTable{
|
||||
R16: []unicode.Range16{
|
||||
{0x0030, 0x003A, 1}, // 0-:
|
||||
{0x0041, 0x005A, 1}, // A-Z
|
||||
{0x005F, 0x005F, 1}, // _
|
||||
{0x0061, 0x007A, 1}, // a-z
|
||||
},
|
||||
LatinOffset: 4,
|
||||
},
|
||||
}
|
||||
|
||||
var labelNameTable = table{
|
||||
First: &unicode.RangeTable{
|
||||
R16: []unicode.Range16{
|
||||
{0x0041, 0x005A, 1}, // A-Z
|
||||
{0x005F, 0x005F, 1}, // _
|
||||
{0x0061, 0x007A, 1}, // a-z
|
||||
},
|
||||
LatinOffset: 3,
|
||||
},
|
||||
Rest: &unicode.RangeTable{
|
||||
R16: []unicode.Range16{
|
||||
{0x0030, 0x0039, 1}, // 0-9
|
||||
{0x0041, 0x005A, 1}, // A-Z
|
||||
{0x005F, 0x005F, 1}, // _
|
||||
{0x0061, 0x007A, 1}, // a-z
|
||||
},
|
||||
LatinOffset: 4,
|
||||
},
|
||||
}
|
||||
|
||||
func isValid(name string, table table) bool {
|
||||
if name == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, r := range name {
|
||||
switch {
|
||||
case i == 0:
|
||||
if !unicode.In(r, table.First) {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
if !unicode.In(r, table.Rest) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Sanitize checks if the name is valid according to the table. If not, it
|
||||
// attempts to replaces invalid runes with an underscore to create a valid
|
||||
// name.
|
||||
func sanitize(name string, table table) (string, bool) {
|
||||
if isValid(name, table) {
|
||||
return name, true
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
|
||||
for i, r := range name {
|
||||
switch {
|
||||
case i == 0:
|
||||
if unicode.In(r, table.First) {
|
||||
b.WriteRune(r)
|
||||
}
|
||||
default:
|
||||
if unicode.In(r, table.Rest) {
|
||||
b.WriteRune(r)
|
||||
} else {
|
||||
b.WriteString("_")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
name = strings.Trim(b.String(), "_")
|
||||
if name == "" {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return name, true
|
||||
}
|
||||
|
||||
// sanitizeMetricName checks if the name is a valid Prometheus metric name. If
|
||||
// not, it attempts to replaces invalid runes with an underscore to create a
|
||||
// valid name.
|
||||
func sanitizeMetricName(name string) (string, bool) {
|
||||
return sanitize(name, metricNameTable)
|
||||
}
|
||||
|
||||
// sanitizeLabelName checks if the name is a valid Prometheus label name. If
|
||||
// not, it attempts to replaces invalid runes with an underscore to create a
|
||||
// valid name.
|
||||
func sanitizeLabelName(name string) (string, bool) {
|
||||
return sanitize(name, labelNameTable)
|
||||
}
|
||||
|
||||
// sampleValue converts a field value into a value suitable for a simple sample value.
|
||||
func sampleValue(value interface{}) (float64, bool) {
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
return v, true
|
||||
case int64:
|
||||
return float64(v), true
|
||||
case uint64:
|
||||
return float64(v), true
|
||||
case bool:
|
||||
if v {
|
||||
return 1.0, true
|
||||
}
|
||||
return 0.0, true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
243
pkg/services/live/remotewrite/remotewrite.go
Normal file
243
pkg/services/live/remotewrite/remotewrite.go
Normal file
@ -0,0 +1,243 @@
|
||||
package remotewrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
type metricKey uint64
|
||||
|
||||
// Serialize frames to Prometheus remote write format.
|
||||
func Serialize(frames ...*data.Frame) ([]byte, error) {
|
||||
return TimeSeriesToBytes(TimeSeriesFromFrames(frames...))
|
||||
}
|
||||
|
||||
// SerializeLabelsColumn frames to Prometheus remote write format.
|
||||
func SerializeLabelsColumn(frames ...*data.Frame) ([]byte, error) {
|
||||
return TimeSeriesToBytes(TimeSeriesFromFramesLabelsColumn(frames...))
|
||||
}
|
||||
|
||||
// TimeSeriesFromFrames converts frames to slice of Prometheus TimeSeries.
|
||||
func TimeSeriesFromFrames(frames ...*data.Frame) []prompb.TimeSeries {
|
||||
var entries = make(map[metricKey]prompb.TimeSeries)
|
||||
var keys []metricKey // sorted keys.
|
||||
|
||||
for _, frame := range frames {
|
||||
timeFieldIndex, ok := timeFieldIndex(frame)
|
||||
if !ok {
|
||||
// Skipping frames without time field.
|
||||
continue
|
||||
}
|
||||
for _, field := range frame.Fields {
|
||||
if !field.Type().Numeric() {
|
||||
continue
|
||||
}
|
||||
metricName := makeMetricName(frame, field)
|
||||
metricName, ok := sanitizeMetricName(metricName)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
var samples []prompb.Sample
|
||||
|
||||
labels := createLabels(field.Labels)
|
||||
key := makeMetricKey(metricName, labels)
|
||||
|
||||
for i := 0; i < field.Len(); i++ {
|
||||
val, ok := field.ConcreteAt(i)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
value, ok := sampleValue(val)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tm, ok := frame.Fields[timeFieldIndex].ConcreteAt(i)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
sample := prompb.Sample{
|
||||
// Timestamp is int milliseconds for remote write.
|
||||
Timestamp: toSampleTime(tm.(time.Time)),
|
||||
Value: value,
|
||||
}
|
||||
samples = append(samples, sample)
|
||||
}
|
||||
|
||||
labelsCopy := make([]prompb.Label, len(labels), len(labels)+1)
|
||||
copy(labelsCopy, labels)
|
||||
labelsCopy = append(labelsCopy, prompb.Label{
|
||||
Name: "__name__",
|
||||
Value: metricName,
|
||||
})
|
||||
promTimeSeries := prompb.TimeSeries{Labels: labelsCopy, Samples: samples}
|
||||
entries[key] = promTimeSeries
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
|
||||
var promTimeSeriesBatch = make([]prompb.TimeSeries, 0, len(entries))
|
||||
for _, key := range keys {
|
||||
promTimeSeriesBatch = append(promTimeSeriesBatch, entries[key])
|
||||
}
|
||||
|
||||
return promTimeSeriesBatch
|
||||
}
|
||||
|
||||
// TimeSeriesFromFramesLabelsColumn converts frames to slice of Prometheus TimeSeries.
|
||||
func TimeSeriesFromFramesLabelsColumn(frames ...*data.Frame) []prompb.TimeSeries {
|
||||
var entries = make(map[metricKey]prompb.TimeSeries)
|
||||
var keys []metricKey // sorted keys.
|
||||
|
||||
for _, frame := range frames {
|
||||
timeFieldIndex, ok := timeFieldIndex(frame)
|
||||
if !ok {
|
||||
// Skipping frames without time field.
|
||||
continue
|
||||
}
|
||||
|
||||
// Labels column frames have first column called "labels".
|
||||
isLabelsColumnFrame := frame.Fields[0].Type() == data.FieldTypeString && frame.Fields[0].Name == "labels"
|
||||
|
||||
var labels [][]prompb.Label
|
||||
|
||||
if isLabelsColumnFrame {
|
||||
labelsField := frame.Fields[0]
|
||||
labels = make([][]prompb.Label, labelsField.Len())
|
||||
for i := 0; i < labelsField.Len(); i++ {
|
||||
val, ok := labelsField.ConcreteAt(i)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(val.(string), ", ")
|
||||
promLabels := make([]prompb.Label, 0)
|
||||
for _, part := range parts {
|
||||
labelParts := strings.SplitN(part, "=", 2)
|
||||
if len(labelParts) != 2 {
|
||||
continue
|
||||
}
|
||||
promLabels = append(promLabels, prompb.Label{Name: labelParts[0], Value: labelParts[1]})
|
||||
}
|
||||
labels[i] = promLabels
|
||||
}
|
||||
}
|
||||
|
||||
for _, field := range frame.Fields {
|
||||
if !field.Type().Numeric() {
|
||||
continue
|
||||
}
|
||||
metricName := makeMetricName(frame, field)
|
||||
metricName, ok := sanitizeMetricName(metricName)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := 0; i < field.Len(); i++ {
|
||||
var labelsCopy []prompb.Label
|
||||
if isLabelsColumnFrame && labels != nil {
|
||||
labelsCopy = make([]prompb.Label, len(labels[i]), len(labels[i])+1)
|
||||
copy(labelsCopy, labels[i])
|
||||
} else {
|
||||
labelsCopy = make([]prompb.Label, 0, len(field.Labels)+1)
|
||||
for k, v := range field.Labels {
|
||||
labelsCopy = append(labelsCopy, prompb.Label{Name: k, Value: v})
|
||||
}
|
||||
}
|
||||
|
||||
val, ok := field.ConcreteAt(i)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
value, ok := sampleValue(val)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tm, ok := frame.Fields[timeFieldIndex].ConcreteAt(i)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
sample := prompb.Sample{
|
||||
// Timestamp is int milliseconds for remote write.
|
||||
Timestamp: toSampleTime(tm.(time.Time)),
|
||||
Value: value,
|
||||
}
|
||||
|
||||
labelsCopy = append(labelsCopy, prompb.Label{
|
||||
Name: "__name__",
|
||||
Value: metricName,
|
||||
})
|
||||
key := makeMetricKey(metricName, labelsCopy)
|
||||
|
||||
promTimeSeries := prompb.TimeSeries{Labels: labelsCopy, Samples: []prompb.Sample{sample}}
|
||||
entries[key] = promTimeSeries
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var promTimeSeriesBatch = make([]prompb.TimeSeries, 0, len(entries))
|
||||
for _, key := range keys {
|
||||
promTimeSeriesBatch = append(promTimeSeriesBatch, entries[key])
|
||||
}
|
||||
|
||||
return promTimeSeriesBatch
|
||||
}
|
||||
|
||||
func timeFieldIndex(frame *data.Frame) (int, bool) {
|
||||
timeFieldIndex := -1
|
||||
for i, field := range frame.Fields {
|
||||
if field.Type().Time() {
|
||||
timeFieldIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
return timeFieldIndex, timeFieldIndex > -1
|
||||
}
|
||||
|
||||
func makeMetricName(frame *data.Frame, field *data.Field) string {
|
||||
return frame.Name + "_" + field.Name
|
||||
}
|
||||
|
||||
func toSampleTime(tm time.Time) int64 {
|
||||
return tm.UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
|
||||
// TimeSeriesToBytes converts Prometheus TimeSeries to snappy compressed byte slice.
|
||||
func TimeSeriesToBytes(ts []prompb.TimeSeries) ([]byte, error) {
|
||||
writeRequestData, err := proto.Marshal(&prompb.WriteRequest{Timeseries: ts})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal protobuf: %v", err)
|
||||
}
|
||||
return snappy.Encode(nil, writeRequestData), nil
|
||||
}
|
||||
|
||||
func makeMetricKey(name string, labels []prompb.Label) metricKey {
|
||||
h := fnv.New64a()
|
||||
_, _ = h.Write([]byte(name))
|
||||
for _, label := range labels {
|
||||
_, _ = h.Write([]byte(label.Name))
|
||||
_, _ = h.Write([]byte("\x00"))
|
||||
_, _ = h.Write([]byte(label.Value))
|
||||
_, _ = h.Write([]byte("\x00"))
|
||||
}
|
||||
return metricKey(h.Sum64())
|
||||
}
|
||||
|
||||
func createLabels(fieldLabels map[string]string) []prompb.Label {
|
||||
labels := make([]prompb.Label, 0, len(fieldLabels))
|
||||
for k, v := range fieldLabels {
|
||||
sanitizedName, ok := sanitizeLabelName(k)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
labels = append(labels, prompb.Label{Name: sanitizedName, Value: v})
|
||||
}
|
||||
return labels
|
||||
}
|
@ -16,9 +16,11 @@ import (
|
||||
type testStreamHandler struct {
|
||||
logger log.Logger
|
||||
frame *data.Frame
|
||||
// If Live Pipeline enabled we are sending the whole frame to have a chance to process stream with rules.
|
||||
livePipelineEnabled bool
|
||||
}
|
||||
|
||||
func newTestStreamHandler(logger log.Logger) *testStreamHandler {
|
||||
func newTestStreamHandler(logger log.Logger, livePipelineEnabled bool) *testStreamHandler {
|
||||
frame := data.NewFrame("testdata",
|
||||
data.NewField("Time", nil, make([]time.Time, 1)),
|
||||
data.NewField("Value", nil, make([]float64, 1)),
|
||||
@ -26,8 +28,9 @@ func newTestStreamHandler(logger log.Logger) *testStreamHandler {
|
||||
data.NewField("Max", nil, make([]float64, 1)),
|
||||
)
|
||||
return &testStreamHandler{
|
||||
frame: frame,
|
||||
logger: logger,
|
||||
frame: frame,
|
||||
logger: logger,
|
||||
livePipelineEnabled: livePipelineEnabled,
|
||||
}
|
||||
}
|
||||
|
||||
@ -117,9 +120,14 @@ func (p *testStreamHandler) runTestStream(ctx context.Context, path string, conf
|
||||
continue
|
||||
}
|
||||
|
||||
mode := data.IncludeDataOnly
|
||||
if p.livePipelineEnabled {
|
||||
mode = data.IncludeAll
|
||||
}
|
||||
|
||||
if flight != nil {
|
||||
flight.set(0, conf.Flight.getNextPoint(t))
|
||||
if err := sender.SendFrame(flight.frame, data.IncludeDataOnly); err != nil {
|
||||
if err := sender.SendFrame(flight.frame, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@ -130,7 +138,7 @@ func (p *testStreamHandler) runTestStream(ctx context.Context, path string, conf
|
||||
p.frame.Fields[1].Set(0, walker) // Value
|
||||
p.frame.Fields[2].Set(0, walker-((rand.Float64()*spread)+0.01)) // Min
|
||||
p.frame.Fields[3].Set(0, walker+((rand.Float64()*spread)+0.01)) // Max
|
||||
if err := sender.SendFrame(p.frame, data.IncludeDataOnly); err != nil {
|
||||
if err := sender.SendFrame(p.frame, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ func ProvideService(cfg *setting.Cfg, manager backendplugin.Manager) (*TestDataP
|
||||
factory := coreplugin.New(backend.ServeOpts{
|
||||
QueryDataHandler: p.queryMux,
|
||||
CallResourceHandler: httpadapter.New(resourceMux),
|
||||
StreamHandler: newTestStreamHandler(p.logger),
|
||||
StreamHandler: newTestStreamHandler(p.logger, cfg.FeatureToggles["live-pipeline"]),
|
||||
})
|
||||
err := manager.Register("testdata", factory)
|
||||
if err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user