mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
CloudWatch: remove unused "live" log code (#44185)
This commit is contained in:
parent
74193b3c1b
commit
2c3bed1f4b
@ -89,7 +89,7 @@ type testState struct {
|
||||
|
||||
func newTestLive(t *testing.T) *live.GrafanaLive {
|
||||
cfg := &setting.Cfg{AppURL: "http://localhost:3000/"}
|
||||
gLive, err := live.ProvideService(nil, cfg, routing.NewRouteRegister(), nil, nil, nil, nil, sqlstore.InitTestDB(t), nil, &usagestats.UsageStatsMock{T: t}, nil)
|
||||
gLive, err := live.ProvideService(nil, cfg, routing.NewRouteRegister(), nil, nil, nil, sqlstore.InitTestDB(t), nil, &usagestats.UsageStatsMock{T: t}, nil)
|
||||
require.NoError(t, err)
|
||||
return gLive
|
||||
}
|
||||
|
@ -116,7 +116,6 @@ var wireBasicSet = wire.NewSet(
|
||||
wire.Bind(new(plugins.Loader), new(*loader.Loader)),
|
||||
wire.Bind(new(plugins.ErrorResolver), new(*loader.Loader)),
|
||||
cloudwatch.ProvideService,
|
||||
cloudwatch.ProvideLogsService,
|
||||
cloudmonitoring.ProvideService,
|
||||
azuremonitor.ProvideService,
|
||||
postgres.ProvideService,
|
||||
|
@ -46,7 +46,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/secrets"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/tsdb/cloudwatch"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
"github.com/grafana/grafana/pkg/web"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@ -66,14 +65,13 @@ type CoreGrafanaScope struct {
|
||||
}
|
||||
|
||||
func ProvideService(plugCtxProvider *plugincontext.Provider, cfg *setting.Cfg, routeRegister routing.RouteRegister,
|
||||
logsService *cloudwatch.LogsService, pluginStore plugins.Store, cacheService *localcache.CacheService,
|
||||
pluginStore plugins.Store, cacheService *localcache.CacheService,
|
||||
dataSourceCache datasources.CacheService, sqlStore *sqlstore.SQLStore, secretsService secrets.Service,
|
||||
usageStatsService usagestats.Service, queryDataService *query.Service) (*GrafanaLive, error) {
|
||||
g := &GrafanaLive{
|
||||
Cfg: cfg,
|
||||
PluginContextProvider: plugCtxProvider,
|
||||
RouteRegister: routeRegister,
|
||||
LogsService: logsService,
|
||||
pluginStore: pluginStore,
|
||||
CacheService: cacheService,
|
||||
DataSourceCache: dataSourceCache,
|
||||
@ -394,7 +392,6 @@ type GrafanaLive struct {
|
||||
PluginContextProvider *plugincontext.Provider
|
||||
Cfg *setting.Cfg
|
||||
RouteRegister routing.RouteRegister
|
||||
LogsService *cloudwatch.LogsService
|
||||
CacheService *localcache.CacheService
|
||||
DataSourceCache datasources.CacheService
|
||||
SQLStore *sqlstore.SQLStore
|
||||
@ -895,13 +892,6 @@ func (g *GrafanaLive) handleGrafanaScope(_ *models.SignedInUser, namespace strin
|
||||
}
|
||||
|
||||
func (g *GrafanaLive) handlePluginScope(ctx context.Context, _ *models.SignedInUser, namespace string) (models.ChannelHandlerFactory, error) {
|
||||
// Temporary hack until we have a more generic solution later on
|
||||
if namespace == "cloudwatch" {
|
||||
return &cloudwatch.LogQueryRunnerSupplier{
|
||||
Publisher: g.Publish,
|
||||
Service: g.LogsService,
|
||||
}, nil
|
||||
}
|
||||
streamHandler, err := g.getStreamPlugin(ctx, namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't find stream plugin: %s", namespace)
|
||||
|
@ -61,10 +61,10 @@ const pluginID = "cloudwatch"
|
||||
var plog = log.New("tsdb.cloudwatch")
|
||||
var aliasFormat = regexp.MustCompile(`\{\{\s*(.+?)\s*\}\}`)
|
||||
|
||||
func ProvideService(cfg *setting.Cfg, logsService *LogsService, httpClientProvider httpclient.Provider, pluginStore plugins.Store) (*CloudWatchService, error) {
|
||||
func ProvideService(cfg *setting.Cfg, httpClientProvider httpclient.Provider, pluginStore plugins.Store) (*CloudWatchService, error) {
|
||||
plog.Debug("initing")
|
||||
|
||||
executor := newExecutor(logsService, datasource.NewInstanceManager(NewInstanceSettings(httpClientProvider)), cfg, awsds.NewSessionCache())
|
||||
executor := newExecutor(datasource.NewInstanceManager(NewInstanceSettings(httpClientProvider)), cfg, awsds.NewSessionCache())
|
||||
factory := coreplugin.New(backend.ServeOpts{
|
||||
QueryDataHandler: executor,
|
||||
})
|
||||
@ -76,28 +76,25 @@ func ProvideService(cfg *setting.Cfg, logsService *LogsService, httpClientProvid
|
||||
}
|
||||
|
||||
return &CloudWatchService{
|
||||
LogsService: logsService,
|
||||
Cfg: cfg,
|
||||
Executor: executor,
|
||||
Cfg: cfg,
|
||||
Executor: executor,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type CloudWatchService struct {
|
||||
LogsService *LogsService
|
||||
Cfg *setting.Cfg
|
||||
Executor *cloudWatchExecutor
|
||||
Cfg *setting.Cfg
|
||||
Executor *cloudWatchExecutor
|
||||
}
|
||||
|
||||
type SessionCache interface {
|
||||
GetSession(c awsds.SessionConfig) (*session.Session, error)
|
||||
}
|
||||
|
||||
func newExecutor(logsService *LogsService, im instancemgmt.InstanceManager, cfg *setting.Cfg, sessions SessionCache) *cloudWatchExecutor {
|
||||
func newExecutor(im instancemgmt.InstanceManager, cfg *setting.Cfg, sessions SessionCache) *cloudWatchExecutor {
|
||||
return &cloudWatchExecutor{
|
||||
logsService: logsService,
|
||||
im: im,
|
||||
cfg: cfg,
|
||||
sessions: sessions,
|
||||
im: im,
|
||||
cfg: cfg,
|
||||
sessions: sessions,
|
||||
}
|
||||
}
|
||||
|
||||
@ -166,10 +163,9 @@ func NewInstanceSettings(httpClientProvider httpclient.Provider) datasource.Inst
|
||||
|
||||
// cloudWatchExecutor executes CloudWatch requests.
|
||||
type cloudWatchExecutor struct {
|
||||
logsService *LogsService
|
||||
im instancemgmt.InstanceManager
|
||||
cfg *setting.Cfg
|
||||
sessions SessionCache
|
||||
im instancemgmt.InstanceManager
|
||||
cfg *setting.Cfg
|
||||
sessions SessionCache
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) newSession(region string, pluginCtx backend.PluginContext) (*session.Session, error) {
|
||||
@ -304,8 +300,6 @@ func (e *cloudWatchExecutor) QueryData(ctx context.Context, req *backend.QueryDa
|
||||
result, err = e.executeAnnotationQuery(ctx, model, q, req.PluginContext)
|
||||
case "logAction":
|
||||
result, err = e.executeLogActions(ctx, req)
|
||||
case "liveLogAction":
|
||||
result, err = e.executeLiveLogQuery(ctx, req)
|
||||
case "timeSeriesQuery":
|
||||
fallthrough
|
||||
default:
|
||||
|
@ -1,295 +1,9 @@
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
|
||||
"github.com/aws/aws-sdk-go/service/servicequotas"
|
||||
"github.com/aws/aws-sdk-go/service/servicequotas/servicequotasiface"
|
||||
"github.com/google/uuid"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/util/retryer"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const defaultConcurrentQueries = 4
|
||||
|
||||
type LogQueryRunnerSupplier struct {
|
||||
Publisher models.ChannelPublisher
|
||||
Service *LogsService
|
||||
}
|
||||
|
||||
type logQueryRunner struct {
|
||||
channelName string
|
||||
publish models.ChannelPublisher
|
||||
running map[string]bool
|
||||
runningMu sync.Mutex
|
||||
service *LogsService
|
||||
}
|
||||
|
||||
const (
|
||||
maxAttempts = 8
|
||||
minRetryDelay = 500 * time.Millisecond
|
||||
maxRetryDelay = 30 * time.Second
|
||||
)
|
||||
|
||||
// GetHandlerForPath gets the channel handler for a certain path.
|
||||
func (s *LogQueryRunnerSupplier) GetHandlerForPath(path string) (models.ChannelHandler, error) {
|
||||
return &logQueryRunner{
|
||||
channelName: path,
|
||||
publish: s.Publisher,
|
||||
running: make(map[string]bool),
|
||||
service: s.Service,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// OnSubscribe publishes results from the corresponding CloudWatch Logs query to the provided channel
|
||||
func (r *logQueryRunner) OnSubscribe(ctx context.Context, user *models.SignedInUser, e models.SubscribeEvent) (models.SubscribeReply, backend.SubscribeStreamStatus, error) {
|
||||
r.runningMu.Lock()
|
||||
defer r.runningMu.Unlock()
|
||||
|
||||
if _, ok := r.running[e.Channel]; ok {
|
||||
return models.SubscribeReply{}, backend.SubscribeStreamStatusOK, nil
|
||||
}
|
||||
|
||||
r.running[e.Channel] = true
|
||||
go func() {
|
||||
if err := r.publishResults(user.OrgId, e.Channel); err != nil {
|
||||
plog.Error(err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
return models.SubscribeReply{}, backend.SubscribeStreamStatusOK, nil
|
||||
}
|
||||
|
||||
// OnPublish checks if a message from the websocket can be broadcast on this channel
|
||||
func (r *logQueryRunner) OnPublish(ctx context.Context, user *models.SignedInUser, e models.PublishEvent) (models.PublishReply, backend.PublishStreamStatus, error) {
|
||||
return models.PublishReply{}, backend.PublishStreamStatusPermissionDenied, nil
|
||||
}
|
||||
|
||||
func (r *logQueryRunner) publishResults(orgID int64, channelName string) error {
|
||||
defer func() {
|
||||
r.service.DeleteResponseChannel(channelName)
|
||||
r.runningMu.Lock()
|
||||
delete(r.running, channelName)
|
||||
r.runningMu.Unlock()
|
||||
}()
|
||||
|
||||
responseChannel, err := r.service.GetResponseChannel(channelName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for response := range responseChannel {
|
||||
responseBytes, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.publish(orgID, channelName, responseBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executeLiveLogQuery executes a CloudWatch Logs query with live updates over WebSocket.
|
||||
// A WebSocket channel is created, which goroutines send responses over.
|
||||
func (e *cloudWatchExecutor) executeLiveLogQuery(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
|
||||
responseChannelName := uuid.New().String()
|
||||
responseChannel := make(chan *backend.QueryDataResponse)
|
||||
if err := e.logsService.AddResponseChannel("plugin/cloudwatch/"+responseChannelName, responseChannel); err != nil {
|
||||
close(responseChannel)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go e.sendLiveQueriesToChannel(req, responseChannel)
|
||||
|
||||
response := &backend.QueryDataResponse{
|
||||
Responses: backend.Responses{
|
||||
"A": {
|
||||
Frames: data.Frames{data.NewFrame("A").SetMeta(&data.FrameMeta{
|
||||
Custom: map[string]interface{}{
|
||||
"channelName": responseChannelName,
|
||||
},
|
||||
})},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) sendLiveQueriesToChannel(req *backend.QueryDataRequest, responseChannel chan *backend.QueryDataResponse) {
|
||||
defer close(responseChannel)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
|
||||
defer cancel()
|
||||
eg, ectx := errgroup.WithContext(ctx)
|
||||
|
||||
for _, query := range req.Queries {
|
||||
query := query
|
||||
eg.Go(func() error {
|
||||
return e.startLiveQuery(ectx, responseChannel, query, query.TimeRange, req.PluginContext)
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
plog.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) getQueue(queueKey string, pluginCtx backend.PluginContext) (chan bool, error) {
|
||||
e.logsService.queueLock.Lock()
|
||||
defer e.logsService.queueLock.Unlock()
|
||||
|
||||
if queue, ok := e.logsService.queues[queueKey]; ok {
|
||||
return queue, nil
|
||||
}
|
||||
|
||||
concurrentQueriesQuota := e.fetchConcurrentQueriesQuota(queueKey, pluginCtx)
|
||||
|
||||
queueChannel := make(chan bool, concurrentQueriesQuota)
|
||||
e.logsService.queues[queueKey] = queueChannel
|
||||
|
||||
return queueChannel, nil
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) fetchConcurrentQueriesQuota(region string, pluginCtx backend.PluginContext) int {
|
||||
sess, err := e.newSession(region, pluginCtx)
|
||||
if err != nil {
|
||||
plog.Warn("Could not get service quota client")
|
||||
return defaultConcurrentQueries
|
||||
}
|
||||
|
||||
client := newQuotasClient(sess)
|
||||
|
||||
concurrentQueriesQuota, err := client.GetServiceQuota(&servicequotas.GetServiceQuotaInput{
|
||||
ServiceCode: aws.String("logs"),
|
||||
QuotaCode: aws.String("L-32C48FBB"),
|
||||
})
|
||||
if err != nil {
|
||||
plog.Warn("Could not get service quota")
|
||||
return defaultConcurrentQueries
|
||||
}
|
||||
|
||||
if concurrentQueriesQuota != nil && concurrentQueriesQuota.Quota != nil && concurrentQueriesQuota.Quota.Value != nil {
|
||||
return int(*concurrentQueriesQuota.Quota.Value)
|
||||
}
|
||||
|
||||
plog.Warn("Could not get service quota")
|
||||
|
||||
defaultConcurrentQueriesQuota, err := client.GetAWSDefaultServiceQuota(&servicequotas.GetAWSDefaultServiceQuotaInput{
|
||||
ServiceCode: aws.String("logs"),
|
||||
QuotaCode: aws.String("L-32C48FBB"),
|
||||
})
|
||||
if err != nil {
|
||||
plog.Warn("Could not get default service quota")
|
||||
return defaultConcurrentQueries
|
||||
}
|
||||
|
||||
if defaultConcurrentQueriesQuota != nil && defaultConcurrentQueriesQuota.Quota != nil &&
|
||||
defaultConcurrentQueriesQuota.Quota.Value != nil {
|
||||
return int(*defaultConcurrentQueriesQuota.Quota.Value)
|
||||
}
|
||||
|
||||
plog.Warn("Could not get default service quota")
|
||||
return defaultConcurrentQueries
|
||||
}
|
||||
|
||||
func (e *cloudWatchExecutor) startLiveQuery(ctx context.Context, responseChannel chan *backend.QueryDataResponse, query backend.DataQuery, timeRange backend.TimeRange, pluginCtx backend.PluginContext) error {
|
||||
model, err := simplejson.NewJson(query.JSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dsInfo, err := e.getDSInfo(pluginCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defaultRegion := dsInfo.region
|
||||
region := model.Get("region").MustString(defaultRegion)
|
||||
logsClient, err := e.getCWLogsClient(region, pluginCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queue, err := e.getQueue(fmt.Sprintf("%s-%d", region, dsInfo.datasourceID), pluginCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait until there are no more active workers than the concurrent queries quota
|
||||
queue <- true
|
||||
defer func() { <-queue }()
|
||||
|
||||
startQueryOutput, err := e.executeStartQuery(ctx, logsClient, model, timeRange)
|
||||
if err != nil {
|
||||
responseChannel <- &backend.QueryDataResponse{
|
||||
Responses: backend.Responses{
|
||||
query.RefID: {Error: err},
|
||||
},
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
queryResultsInput := &cloudwatchlogs.GetQueryResultsInput{
|
||||
QueryId: startQueryOutput.QueryId,
|
||||
}
|
||||
|
||||
recordsMatched := 0.0
|
||||
return retryer.Retry(func() (retryer.RetrySignal, error) {
|
||||
getQueryResultsOutput, err := logsClient.GetQueryResultsWithContext(ctx, queryResultsInput)
|
||||
if err != nil {
|
||||
return retryer.FuncError, err
|
||||
}
|
||||
|
||||
retryNeeded := *getQueryResultsOutput.Statistics.RecordsMatched <= recordsMatched
|
||||
recordsMatched = *getQueryResultsOutput.Statistics.RecordsMatched
|
||||
|
||||
dataFrame, err := logsResultsToDataframes(getQueryResultsOutput)
|
||||
if err != nil {
|
||||
return retryer.FuncError, err
|
||||
}
|
||||
|
||||
dataFrame.Name = query.RefID
|
||||
dataFrame.RefID = query.RefID
|
||||
dataFrames, err := groupResponseFrame(dataFrame, model.Get("statsGroups").MustStringArray())
|
||||
if err != nil {
|
||||
return retryer.FuncError, fmt.Errorf("failed to group dataframe response: %v", err)
|
||||
}
|
||||
|
||||
responseChannel <- &backend.QueryDataResponse{
|
||||
Responses: backend.Responses{
|
||||
query.RefID: {
|
||||
Frames: dataFrames,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if isTerminated(*getQueryResultsOutput.Status) {
|
||||
return retryer.FuncComplete, nil
|
||||
} else if retryNeeded {
|
||||
return retryer.FuncFailure, nil
|
||||
}
|
||||
|
||||
return retryer.FuncSuccess, nil
|
||||
}, maxAttempts, minRetryDelay, maxRetryDelay)
|
||||
}
|
||||
|
||||
func groupResponseFrame(frame *data.Frame, statsGroups []string) (data.Frames, error) {
|
||||
var dataFrames data.Frames
|
||||
|
||||
@ -337,15 +51,3 @@ func setPreferredVisType(frame *data.Frame, visType data.VisType) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Service quotas client factory.
|
||||
//
|
||||
// Stubbable by tests.
|
||||
var newQuotasClient = func(sess *session.Session) servicequotasiface.ServiceQuotasAPI {
|
||||
client := servicequotas.New(sess)
|
||||
client.Handlers.Send.PushFront(func(r *request.Request) {
|
||||
r.HTTPRequest.Header.Set("User-Agent", fmt.Sprintf("Grafana/%s", setting.BuildVersion))
|
||||
})
|
||||
|
||||
return client
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ func TestQuery_DescribeLogGroups(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -108,7 +108,7 @@ func TestQuery_DescribeLogGroups(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -183,7 +183,7 @@ func TestQuery_GetLogGroupFields(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -264,7 +264,7 @@ func TestQuery_StartQuery(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
_, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -317,7 +317,7 @@ func TestQuery_StartQuery(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -397,7 +397,7 @@ func TestQuery_StopQuery(t *testing.T) {
|
||||
To: time.Unix(1584700643, 0),
|
||||
}
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -487,7 +487,7 @@ func TestQuery_GetQueryResults(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
|
@ -1,58 +0,0 @@
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
)
|
||||
|
||||
func ProvideLogsService() *LogsService {
|
||||
return &LogsService{
|
||||
responseChannels: make(map[string]chan *backend.QueryDataResponse),
|
||||
queues: make(map[string](chan bool)),
|
||||
}
|
||||
}
|
||||
|
||||
// LogsService provides methods for querying CloudWatch Logs.
|
||||
type LogsService struct {
|
||||
channelMu sync.Mutex
|
||||
responseChannels map[string]chan *backend.QueryDataResponse
|
||||
queues map[string](chan bool)
|
||||
queueLock sync.Mutex
|
||||
}
|
||||
|
||||
func (s *LogsService) AddResponseChannel(name string, channel chan *backend.QueryDataResponse) error {
|
||||
s.channelMu.Lock()
|
||||
defer s.channelMu.Unlock()
|
||||
|
||||
if _, ok := s.responseChannels[name]; ok {
|
||||
return fmt.Errorf("channel with name '%s' already exists", name)
|
||||
}
|
||||
|
||||
s.responseChannels[name] = channel
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LogsService) GetResponseChannel(name string) (chan *backend.QueryDataResponse, error) {
|
||||
s.channelMu.Lock()
|
||||
defer s.channelMu.Unlock()
|
||||
|
||||
if responseChannel, ok := s.responseChannels[name]; ok {
|
||||
return responseChannel, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("channel with name '%s' not found", name)
|
||||
}
|
||||
|
||||
func (s *LogsService) DeleteResponseChannel(name string) {
|
||||
s.channelMu.Lock()
|
||||
defer s.channelMu.Unlock()
|
||||
|
||||
if _, ok := s.responseChannels[name]; ok {
|
||||
delete(s.responseChannels, name)
|
||||
return
|
||||
}
|
||||
|
||||
plog.Warn("Channel with name '" + name + "' not found")
|
||||
}
|
@ -10,7 +10,7 @@ import (
|
||||
func TestMetricDataQueryBuilder(t *testing.T) {
|
||||
t.Run("buildMetricDataQuery", func(t *testing.T) {
|
||||
t.Run("should use metric stat", func(t *testing.T) {
|
||||
executor := newExecutor(nil, nil, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(nil, newTestConfig(), fakeSessionCache{})
|
||||
query := getBaseQuery()
|
||||
query.MetricEditorMode = MetricEditorModeBuilder
|
||||
query.MetricQueryType = MetricQueryTypeSearch
|
||||
@ -22,7 +22,7 @@ func TestMetricDataQueryBuilder(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("should use custom built expression", func(t *testing.T) {
|
||||
executor := newExecutor(nil, nil, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(nil, newTestConfig(), fakeSessionCache{})
|
||||
query := getBaseQuery()
|
||||
query.MetricEditorMode = MetricEditorModeBuilder
|
||||
query.MetricQueryType = MetricQueryTypeSearch
|
||||
@ -34,7 +34,7 @@ func TestMetricDataQueryBuilder(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("should use sql expression", func(t *testing.T) {
|
||||
executor := newExecutor(nil, nil, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(nil, newTestConfig(), fakeSessionCache{})
|
||||
query := getBaseQuery()
|
||||
query.MetricEditorMode = MetricEditorModeRaw
|
||||
query.MetricQueryType = MetricQueryTypeQuery
|
||||
@ -46,7 +46,7 @@ func TestMetricDataQueryBuilder(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("should use user defined math expression", func(t *testing.T) {
|
||||
executor := newExecutor(nil, nil, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(nil, newTestConfig(), fakeSessionCache{})
|
||||
query := getBaseQuery()
|
||||
query.MetricEditorMode = MetricEditorModeRaw
|
||||
query.MetricQueryType = MetricQueryTypeSearch
|
||||
@ -58,7 +58,7 @@ func TestMetricDataQueryBuilder(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("should set period in user defined expression", func(t *testing.T) {
|
||||
executor := newExecutor(nil, nil, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(nil, newTestConfig(), fakeSessionCache{})
|
||||
query := getBaseQuery()
|
||||
query.MetricEditorMode = MetricEditorModeRaw
|
||||
query.MetricQueryType = MetricQueryTypeSearch
|
||||
|
@ -53,7 +53,7 @@ func TestQuery_Metrics(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -108,7 +108,7 @@ func TestQuery_Metrics(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -167,7 +167,7 @@ func TestQuery_Regions(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -242,7 +242,7 @@ func TestQuery_InstanceAttributes(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -341,7 +341,7 @@ func TestQuery_EBSVolumeIDs(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -420,7 +420,7 @@ func TestQuery_ResourceARNs(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -471,7 +471,7 @@ func TestQuery_GetAllMetrics(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -526,7 +526,7 @@ func TestQuery_GetDimensionKeys(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -574,7 +574,7 @@ func TestQuery_GetDimensionKeys(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
@ -686,7 +686,7 @@ func TestQuery_ListMetricsPagination(t *testing.T) {
|
||||
im := datasource.NewInstanceManager(func(s backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
executor := newExecutor(nil, im, &setting.Cfg{AWSListMetricsPageLimit: 3, AWSAllowedAuthProviders: []string{"default"}, AWSAssumeRoleEnabled: true}, fakeSessionCache{})
|
||||
executor := newExecutor(im, &setting.Cfg{AWSListMetricsPageLimit: 3, AWSAllowedAuthProviders: []string{"default"}, AWSAssumeRoleEnabled: true}, fakeSessionCache{})
|
||||
response, err := executor.listMetrics("default", &cloudwatch.ListMetricsInput{}, backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
})
|
||||
@ -701,7 +701,7 @@ func TestQuery_ListMetricsPagination(t *testing.T) {
|
||||
im := datasource.NewInstanceManager(func(s backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
executor := newExecutor(nil, im, &setting.Cfg{AWSListMetricsPageLimit: 1000, AWSAllowedAuthProviders: []string{"default"}, AWSAssumeRoleEnabled: true}, fakeSessionCache{})
|
||||
executor := newExecutor(im, &setting.Cfg{AWSListMetricsPageLimit: 1000, AWSAllowedAuthProviders: []string{"default"}, AWSAssumeRoleEnabled: true}, fakeSessionCache{})
|
||||
response, err := executor.listMetrics("default", &cloudwatch.ListMetricsInput{}, backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
})
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func TestTimeSeriesQuery(t *testing.T) {
|
||||
executor := newExecutor(nil, nil, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(nil, newTestConfig(), fakeSessionCache{})
|
||||
now := time.Now()
|
||||
|
||||
origNewCWClient := NewCWClient
|
||||
@ -54,7 +54,7 @@ func TestTimeSeriesQuery(t *testing.T) {
|
||||
return datasourceInfo{}, nil
|
||||
})
|
||||
|
||||
executor := newExecutor(nil, im, newTestConfig(), fakeSessionCache{})
|
||||
executor := newExecutor(im, newTestConfig(), fakeSessionCache{})
|
||||
resp, err := executor.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
|
||||
|
Loading…
Reference in New Issue
Block a user