SearchV2: add more tracing spans (#55488)

* add additional spans to searchv2

* add initial setup span

* add dashboardCount attribute to readDashboard span

* add dashboard count to init org index span

* fix merge

Co-authored-by: Todd Treece <todd.treece@grafana.com>
This commit is contained in:
Artur Wierzbicki 2022-09-21 16:14:52 +02:00 committed by GitHub
parent 0629d3922a
commit 1f2cab9036
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 51 additions and 13 deletions

View File

@ -180,6 +180,7 @@ func (i *searchIndex) sync(ctx context.Context) error {
func (i *searchIndex) run(ctx context.Context, orgIDs []int64, reIndexSignalCh chan struct{}) error {
i.logger.Info("Initializing SearchV2", "dashboardLoadingBatchSize", i.settings.DashboardLoadingBatchSize, "fullReindexInterval", i.settings.FullReindexInterval, "indexUpdateInterval", i.settings.IndexUpdateInterval)
initialSetupCtx, initialSetupSpan := i.tracer.Start(ctx, "searchV2 initialSetup")
reIndexInterval := i.settings.FullReindexInterval
fullReIndexTimer := time.NewTimer(reIndexInterval)
@ -190,16 +191,18 @@ func (i *searchIndex) run(ctx context.Context, orgIDs []int64, reIndexSignalCh c
defer partialUpdateTimer.Stop()
var lastEventID int64
lastEvent, err := i.eventStore.GetLastEvent(ctx)
lastEvent, err := i.eventStore.GetLastEvent(initialSetupCtx)
if err != nil {
initialSetupSpan.End()
return err
}
if lastEvent != nil {
lastEventID = lastEvent.Id
}
err = i.buildInitialIndexes(ctx, orgIDs)
err = i.buildInitialIndexes(initialSetupCtx, orgIDs)
if err != nil {
initialSetupSpan.End()
return err
}
@ -213,6 +216,8 @@ func (i *searchIndex) run(ctx context.Context, orgIDs []int64, reIndexSignalCh c
i.initialIndexingComplete = true
i.initializationMutex.Unlock()
initialSetupSpan.End()
for {
select {
case doneCh := <-i.syncCh:
@ -458,11 +463,17 @@ func (i *searchIndex) reportSizeOfIndexDiskBackup(orgID int64) {
}
func (i *searchIndex) buildOrgIndex(ctx context.Context, orgID int64) (int, error) {
spanCtx, span := i.tracer.Start(ctx, "searchV2 buildOrgIndex")
span.SetAttributes("org_id", orgID, attribute.Key("org_id").Int64(orgID))
started := time.Now()
ctx, cancel := context.WithTimeout(ctx, time.Minute)
ctx, cancel := context.WithTimeout(spanCtx, time.Minute)
ctx = log.InitCounter(ctx)
defer cancel()
defer func() {
span.End()
cancel()
}()
i.logger.Info("Start building org index", "orgId", orgID)
dashboards, err := i.loader.LoadDashboards(ctx, orgID, "")
@ -475,8 +486,9 @@ func (i *searchIndex) buildOrgIndex(ctx context.Context, orgID int64) (int, erro
dashboardExtender := i.extender.GetDashboardExtender(orgID)
_, initOrgIndexSpan := i.tracer.Start(ctx, "searchV2 init org index")
_, initOrgIndexSpan := i.tracer.Start(ctx, "searchV2 buildOrgIndex init org index")
initOrgIndexSpan.SetAttributes("org_id", orgID, attribute.Key("org_id").Int64(orgID))
initOrgIndexSpan.SetAttributes("dashboardCount", len(dashboards), attribute.Key("dashboardCount").Int(len(dashboards)))
index, err := initOrgIndex(dashboards, i.logger, dashboardExtender)
@ -512,7 +524,7 @@ func (i *searchIndex) buildOrgIndex(ctx context.Context, orgID int64) (int, erro
go func() {
if reader, cancel, err := index.readerForIndex(indexTypeDashboard); err == nil {
defer cancel()
updateUsageStats(context.Background(), reader, i.logger)
updateUsageStats(context.Background(), reader, i.logger, i.tracer)
}
}()
}
@ -793,14 +805,20 @@ func (i *searchIndex) updateDashboard(ctx context.Context, orgID int64, index *o
type sqlDashboardLoader struct {
sql *sqlstore.SQLStore
logger log.Logger
tracer tracing.Tracer
settings setting.SearchSettings
}
func newSQLDashboardLoader(sql *sqlstore.SQLStore, settings setting.SearchSettings) *sqlDashboardLoader {
return &sqlDashboardLoader{sql: sql, logger: log.New("sqlDashboardLoader"), settings: settings}
func newSQLDashboardLoader(sql *sqlstore.SQLStore, tracer tracing.Tracer, settings setting.SearchSettings) *sqlDashboardLoader {
return &sqlDashboardLoader{sql: sql, logger: log.New("sqlDashboardLoader"), tracer: tracer, settings: settings}
}
func (l sqlDashboardLoader) LoadDashboards(ctx context.Context, orgID int64, dashboardUID string) ([]dashboard, error) {
ctx, span := l.tracer.Start(ctx, "sqlDashboardLoader LoadDashboards")
span.SetAttributes("orgID", orgID, attribute.Key("orgID").Int64(orgID))
defer span.End()
var dashboards []dashboard
limit := 1
@ -825,18 +843,28 @@ func (l sqlDashboardLoader) LoadDashboards(ctx context.Context, orgID int64, das
})
}
loadDatasourceCtx, loadDatasourceSpan := l.tracer.Start(ctx, "sqlDashboardLoader LoadDatasourceLookup")
loadDatasourceSpan.SetAttributes("orgID", orgID, attribute.Key("orgID").Int64(orgID))
// key will allow name or uid
lookup, err := dslookup.LoadDatasourceLookup(ctx, orgID, l.sql)
lookup, err := dslookup.LoadDatasourceLookup(loadDatasourceCtx, orgID, l.sql)
if err != nil {
loadDatasourceSpan.End()
return dashboards, err
}
loadDatasourceSpan.End()
var lastID int64
for {
rows := make([]*dashboardQueryResult, 0, limit)
dashboardQueryCtx, dashboardQuerySpan := l.tracer.Start(ctx, "sqlDashboardLoader dashboardQuery")
dashboardQuerySpan.SetAttributes("orgID", orgID, attribute.Key("orgID").Int64(orgID))
dashboardQuerySpan.SetAttributes("dashboardUID", dashboardUID, attribute.Key("dashboardUID").String(dashboardUID))
dashboardQuerySpan.SetAttributes("lastID", lastID, attribute.Key("lastID").Int64(lastID))
err = l.sql.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
rows := make([]dashboardQueryResult, 0, limit)
err = l.sql.WithDbSession(dashboardQueryCtx, func(sess *sqlstore.DBSession) error {
sess.Table("dashboard").
Where("org_id = ?", orgID)
@ -857,8 +885,14 @@ func (l sqlDashboardLoader) LoadDashboards(ctx context.Context, orgID int64, das
})
if err != nil {
dashboardQuerySpan.End()
return nil, err
}
dashboardQuerySpan.End()
_, readDashboardSpan := l.tracer.Start(ctx, "sqlDashboardLoader readDashboard")
readDashboardSpan.SetAttributes("orgID", orgID, attribute.Key("orgID").Int64(orgID))
readDashboardSpan.SetAttributes("dashboardCount", len(rows), attribute.Key("dashboardCount").Int(len(rows)))
for _, row := range rows {
info, err := extract.ReadDashboard(bytes.NewReader(row.Data), lookup)
@ -878,6 +912,7 @@ func (l sqlDashboardLoader) LoadDashboards(ctx context.Context, orgID int64, das
})
lastID = row.Id
}
readDashboardSpan.End()
if len(rows) < limit || dashboardUID != "" {
break

View File

@ -81,7 +81,7 @@ func ProvideService(cfg *setting.Cfg, sql *sqlstore.SQLStore, entityEventStore s
ac: ac,
},
dashboardIndex: newSearchIndex(
newSQLDashboardLoader(sql, cfg.Search),
newSQLDashboardLoader(sql, tracer, cfg.Search),
entityEventStore,
extender.GetDocumentExtender(),
newFolderIDLookup(sql),

View File

@ -7,6 +7,7 @@ import (
"github.com/blugelabs/bluge/search"
"github.com/blugelabs/bluge/search/aggregations"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
@ -42,7 +43,9 @@ var (
}
)
func updateUsageStats(ctx context.Context, reader *bluge.Reader, logger log.Logger) {
func updateUsageStats(ctx context.Context, reader *bluge.Reader, logger log.Logger, tracer tracing.Tracer) {
ctx, span := tracer.Start(ctx, "searchV2 updateUsageStats")
defer span.End()
req := bluge.NewAllMatches(bluge.NewTermQuery("panel").SetField(documentFieldKind))
for _, usage := range panelUsage {
req.AddAggregation(usage.field, aggregations.NewTermsAggregation(search.Field(usage.field), 50))