2022-04-27 03:29:39 -05:00
|
|
|
|
package searchV2
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"bytes"
|
|
|
|
|
"context"
|
2022-06-22 07:49:26 -05:00
|
|
|
|
"errors"
|
2022-04-27 03:29:39 -05:00
|
|
|
|
"fmt"
|
2022-05-19 10:57:26 -05:00
|
|
|
|
"os"
|
2022-06-22 07:49:26 -05:00
|
|
|
|
"os/exec"
|
2022-05-19 10:57:26 -05:00
|
|
|
|
"runtime"
|
2022-04-27 03:29:39 -05:00
|
|
|
|
"strconv"
|
|
|
|
|
"strings"
|
|
|
|
|
"sync"
|
|
|
|
|
"time"
|
|
|
|
|
|
2022-10-19 08:02:15 -05:00
|
|
|
|
"github.com/blugelabs/bluge"
|
|
|
|
|
"go.opentelemetry.io/otel/attribute"
|
|
|
|
|
|
|
|
|
|
"github.com/grafana/grafana/pkg/infra/db"
|
2022-04-27 03:29:39 -05:00
|
|
|
|
"github.com/grafana/grafana/pkg/infra/log"
|
2022-09-20 09:49:44 -05:00
|
|
|
|
"github.com/grafana/grafana/pkg/infra/tracing"
|
|
|
|
|
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
2023-01-27 06:12:30 -06:00
|
|
|
|
"github.com/grafana/grafana/pkg/services/folder"
|
2022-04-27 03:29:39 -05:00
|
|
|
|
"github.com/grafana/grafana/pkg/services/store"
|
2023-01-25 11:43:22 -06:00
|
|
|
|
"github.com/grafana/grafana/pkg/services/store/entity"
|
2022-10-08 11:05:46 -05:00
|
|
|
|
kdash "github.com/grafana/grafana/pkg/services/store/kind/dashboard"
|
2022-09-20 18:09:55 -05:00
|
|
|
|
"github.com/grafana/grafana/pkg/setting"
|
2022-04-27 03:29:39 -05:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
type dashboardLoader interface {
|
|
|
|
|
// LoadDashboards returns slice of dashboards. If dashboardUID is empty – then
|
|
|
|
|
// implementation must return all dashboards in instance to build an entire
|
|
|
|
|
// dashboard index for an organization. If dashboardUID is not empty – then only
|
|
|
|
|
// return dashboard with specified UID or empty slice if not found (this is required
|
|
|
|
|
// to apply partial update).
|
|
|
|
|
LoadDashboards(ctx context.Context, orgID int64, dashboardUID string) ([]dashboard, error)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type eventStore interface {
|
|
|
|
|
GetLastEvent(ctx context.Context) (*store.EntityEvent, error)
|
|
|
|
|
GetAllEventsAfter(ctx context.Context, id int64) ([]*store.EntityEvent, error)
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-23 11:22:35 -05:00
|
|
|
|
// While we migrate away from internal IDs... this lets us lookup values in SQL
|
|
|
|
|
// NOTE: folderId is unique across all orgs
|
|
|
|
|
type folderUIDLookup = func(ctx context.Context, folderId int64) (string, error)
|
|
|
|
|
|
2022-04-27 03:29:39 -05:00
|
|
|
|
type dashboard struct {
|
|
|
|
|
id int64
|
|
|
|
|
uid string
|
|
|
|
|
isFolder bool
|
|
|
|
|
folderID int64
|
|
|
|
|
slug string
|
|
|
|
|
created time.Time
|
|
|
|
|
updated time.Time
|
2022-09-27 17:08:47 -05:00
|
|
|
|
|
|
|
|
|
// Use generic structure
|
2023-01-25 11:43:22 -06:00
|
|
|
|
summary *entity.EntitySummary
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-06-22 05:12:07 -05:00
|
|
|
|
// buildSignal is sent when search index is accessed in organization for which
|
|
|
|
|
// we have not constructed an index yet.
|
|
|
|
|
type buildSignal struct {
|
|
|
|
|
orgID int64
|
|
|
|
|
done chan error
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
type orgIndex struct {
|
|
|
|
|
writers map[indexType]*bluge.Writer
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type indexType string
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
indexTypeDashboard indexType = "dashboard"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func (i *orgIndex) writerForIndex(idxType indexType) *bluge.Writer {
|
|
|
|
|
return i.writers[idxType]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (i *orgIndex) readerForIndex(idxType indexType) (*bluge.Reader, func(), error) {
|
|
|
|
|
reader, err := i.writers[idxType].Reader()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, nil, err
|
|
|
|
|
}
|
|
|
|
|
return reader, func() { _ = reader.Close() }, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type searchIndex struct {
|
2022-08-26 03:36:41 -05:00
|
|
|
|
mu sync.RWMutex
|
|
|
|
|
loader dashboardLoader
|
|
|
|
|
perOrgIndex map[int64]*orgIndex
|
|
|
|
|
initializedOrgs map[int64]bool
|
|
|
|
|
initialIndexingComplete bool
|
|
|
|
|
initializationMutex sync.RWMutex
|
|
|
|
|
eventStore eventStore
|
|
|
|
|
logger log.Logger
|
|
|
|
|
buildSignals chan buildSignal
|
|
|
|
|
extender DocumentExtender
|
|
|
|
|
folderIdLookup folderUIDLookup
|
|
|
|
|
syncCh chan chan struct{}
|
2022-09-20 09:49:44 -05:00
|
|
|
|
tracer tracing.Tracer
|
|
|
|
|
features featuremgmt.FeatureToggles
|
2022-09-20 18:09:55 -05:00
|
|
|
|
settings setting.SearchSettings
|
2022-05-16 18:22:45 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-09-20 18:09:55 -05:00
|
|
|
|
func newSearchIndex(dashLoader dashboardLoader, evStore eventStore, extender DocumentExtender, folderIDs folderUIDLookup, tracer tracing.Tracer, features featuremgmt.FeatureToggles, settings setting.SearchSettings) *searchIndex {
|
2022-07-04 03:33:07 -05:00
|
|
|
|
return &searchIndex{
|
2022-08-26 03:36:41 -05:00
|
|
|
|
loader: dashLoader,
|
|
|
|
|
eventStore: evStore,
|
|
|
|
|
perOrgIndex: map[int64]*orgIndex{},
|
|
|
|
|
initializedOrgs: map[int64]bool{},
|
|
|
|
|
logger: log.New("searchIndex"),
|
|
|
|
|
buildSignals: make(chan buildSignal),
|
|
|
|
|
extender: extender,
|
|
|
|
|
folderIdLookup: folderIDs,
|
|
|
|
|
syncCh: make(chan chan struct{}),
|
2022-09-20 09:49:44 -05:00
|
|
|
|
tracer: tracer,
|
|
|
|
|
features: features,
|
2022-09-20 18:09:55 -05:00
|
|
|
|
settings: settings,
|
2022-06-22 04:21:43 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-26 03:36:41 -05:00
|
|
|
|
func (i *searchIndex) isInitialized(_ context.Context, orgId int64) IsSearchReadyResponse {
|
|
|
|
|
i.initializationMutex.RLock()
|
|
|
|
|
orgInitialized := i.initializedOrgs[orgId]
|
|
|
|
|
initialInitComplete := i.initialIndexingComplete
|
|
|
|
|
i.initializationMutex.RUnlock()
|
|
|
|
|
|
|
|
|
|
if orgInitialized && initialInitComplete {
|
|
|
|
|
return IsSearchReadyResponse{IsReady: true}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !initialInitComplete {
|
|
|
|
|
return IsSearchReadyResponse{IsReady: false, Reason: "initial-indexing-ongoing"}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i.triggerBuildingOrgIndex(orgId)
|
|
|
|
|
return IsSearchReadyResponse{IsReady: false, Reason: "org-indexing-ongoing"}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (i *searchIndex) triggerBuildingOrgIndex(orgId int64) {
|
|
|
|
|
go func() {
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
|
|
doneIndexing := make(chan error, 1)
|
|
|
|
|
signal := buildSignal{orgID: orgId, done: doneIndexing}
|
|
|
|
|
select {
|
|
|
|
|
case i.buildSignals <- signal:
|
|
|
|
|
case <-ctx.Done():
|
|
|
|
|
i.logger.Warn("Failed to send a build signal to initialize org index", "orgId", orgId)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
select {
|
|
|
|
|
case err := <-doneIndexing:
|
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Error("Failed to build org index", "orgId", orgId, "error", err)
|
|
|
|
|
} else {
|
|
|
|
|
i.logger.Debug("Successfully built org index", "orgId", orgId)
|
|
|
|
|
}
|
|
|
|
|
case <-ctx.Done():
|
|
|
|
|
i.logger.Warn("Building org index timeout", "orgId", orgId)
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) sync(ctx context.Context) error {
|
2022-06-22 04:21:43 -05:00
|
|
|
|
doneCh := make(chan struct{}, 1)
|
|
|
|
|
select {
|
|
|
|
|
case i.syncCh <- doneCh:
|
|
|
|
|
case <-ctx.Done():
|
|
|
|
|
return ctx.Err()
|
|
|
|
|
}
|
|
|
|
|
select {
|
|
|
|
|
case <-doneCh:
|
|
|
|
|
return nil
|
|
|
|
|
case <-ctx.Done():
|
|
|
|
|
return ctx.Err()
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) run(ctx context.Context, orgIDs []int64, reIndexSignalCh chan struct{}) error {
|
2022-09-20 18:09:55 -05:00
|
|
|
|
i.logger.Info("Initializing SearchV2", "dashboardLoadingBatchSize", i.settings.DashboardLoadingBatchSize, "fullReindexInterval", i.settings.FullReindexInterval, "indexUpdateInterval", i.settings.IndexUpdateInterval)
|
2022-09-21 09:14:52 -05:00
|
|
|
|
initialSetupCtx, initialSetupSpan := i.tracer.Start(ctx, "searchV2 initialSetup")
|
2022-09-20 18:09:55 -05:00
|
|
|
|
|
|
|
|
|
reIndexInterval := i.settings.FullReindexInterval
|
2022-06-22 04:21:43 -05:00
|
|
|
|
fullReIndexTimer := time.NewTimer(reIndexInterval)
|
|
|
|
|
defer fullReIndexTimer.Stop()
|
2022-04-27 03:29:39 -05:00
|
|
|
|
|
2022-09-20 18:09:55 -05:00
|
|
|
|
partialUpdateInterval := i.settings.IndexUpdateInterval
|
2022-06-22 04:21:43 -05:00
|
|
|
|
partialUpdateTimer := time.NewTimer(partialUpdateInterval)
|
|
|
|
|
defer partialUpdateTimer.Stop()
|
2022-04-27 03:29:39 -05:00
|
|
|
|
|
|
|
|
|
var lastEventID int64
|
2022-09-21 09:14:52 -05:00
|
|
|
|
lastEvent, err := i.eventStore.GetLastEvent(initialSetupCtx)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
if err != nil {
|
2022-09-21 09:14:52 -05:00
|
|
|
|
initialSetupSpan.End()
|
2022-04-27 03:29:39 -05:00
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
if lastEvent != nil {
|
|
|
|
|
lastEventID = lastEvent.Id
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-21 09:14:52 -05:00
|
|
|
|
err = i.buildInitialIndexes(initialSetupCtx, orgIDs)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
if err != nil {
|
2022-09-21 09:14:52 -05:00
|
|
|
|
initialSetupSpan.End()
|
2022-06-22 05:12:07 -05:00
|
|
|
|
return err
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
2022-05-09 03:00:09 -05:00
|
|
|
|
|
2022-06-27 10:11:08 -05:00
|
|
|
|
// This semaphore channel allows limiting concurrent async re-indexing routines to 1.
|
|
|
|
|
asyncReIndexSemaphore := make(chan struct{}, 1)
|
|
|
|
|
|
2022-06-22 04:21:43 -05:00
|
|
|
|
// Channel to handle signals about asynchronous full re-indexing completion.
|
|
|
|
|
reIndexDoneCh := make(chan int64, 1)
|
2022-06-03 15:11:32 -05:00
|
|
|
|
|
2022-08-26 03:36:41 -05:00
|
|
|
|
i.initializationMutex.Lock()
|
|
|
|
|
i.initialIndexingComplete = true
|
|
|
|
|
i.initializationMutex.Unlock()
|
|
|
|
|
|
2022-09-21 09:14:52 -05:00
|
|
|
|
initialSetupSpan.End()
|
|
|
|
|
|
2022-04-27 03:29:39 -05:00
|
|
|
|
for {
|
|
|
|
|
select {
|
2022-06-22 04:21:43 -05:00
|
|
|
|
case doneCh := <-i.syncCh:
|
2022-06-27 10:11:08 -05:00
|
|
|
|
// Executed on search read requests to make sure index is consistent.
|
2022-06-22 04:21:43 -05:00
|
|
|
|
lastEventID = i.applyIndexUpdates(ctx, lastEventID)
|
|
|
|
|
close(doneCh)
|
|
|
|
|
case <-partialUpdateTimer.C:
|
2022-06-27 10:11:08 -05:00
|
|
|
|
// Periodically apply updates collected in entity events table.
|
2022-09-20 09:49:44 -05:00
|
|
|
|
partialIndexUpdateCtx, span := i.tracer.Start(ctx, "searchV2 partial update timer")
|
|
|
|
|
lastEventID = i.applyIndexUpdates(partialIndexUpdateCtx, lastEventID)
|
|
|
|
|
span.End()
|
2022-06-22 04:21:43 -05:00
|
|
|
|
partialUpdateTimer.Reset(partialUpdateInterval)
|
2022-06-27 10:11:08 -05:00
|
|
|
|
case <-reIndexSignalCh:
|
|
|
|
|
// External systems may trigger re-indexing, at this moment provisioning does this.
|
|
|
|
|
i.logger.Info("Full re-indexing due to external signal")
|
|
|
|
|
fullReIndexTimer.Reset(0)
|
2022-06-22 05:12:07 -05:00
|
|
|
|
case signal := <-i.buildSignals:
|
2022-09-20 09:49:44 -05:00
|
|
|
|
buildSignalCtx, span := i.tracer.Start(ctx, "searchV2 build signal")
|
|
|
|
|
|
2022-06-27 10:11:08 -05:00
|
|
|
|
// When search read request meets new not-indexed org we build index for it.
|
2022-05-16 18:22:45 -05:00
|
|
|
|
i.mu.RLock()
|
2022-07-04 03:33:07 -05:00
|
|
|
|
_, ok := i.perOrgIndex[signal.orgID]
|
2022-05-16 18:22:45 -05:00
|
|
|
|
if ok {
|
2022-09-20 09:49:44 -05:00
|
|
|
|
span.End()
|
2022-05-16 18:22:45 -05:00
|
|
|
|
// Index for org already exists, do nothing.
|
|
|
|
|
i.mu.RUnlock()
|
2022-06-22 05:12:07 -05:00
|
|
|
|
close(signal.done)
|
2022-05-16 18:22:45 -05:00
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
i.mu.RUnlock()
|
2022-06-22 05:12:07 -05:00
|
|
|
|
lastIndexedEventID := lastEventID
|
|
|
|
|
// Prevent full re-indexing while we are building index for new org.
|
|
|
|
|
// Full re-indexing will be later re-started in `case lastIndexedEventID := <-reIndexDoneCh`
|
|
|
|
|
// branch.
|
|
|
|
|
fullReIndexTimer.Stop()
|
|
|
|
|
go func() {
|
2022-09-20 09:49:44 -05:00
|
|
|
|
defer span.End()
|
2022-06-27 10:11:08 -05:00
|
|
|
|
// We need semaphore here since asynchronous re-indexing may be in progress already.
|
|
|
|
|
asyncReIndexSemaphore <- struct{}{}
|
|
|
|
|
defer func() { <-asyncReIndexSemaphore }()
|
2022-09-20 09:49:44 -05:00
|
|
|
|
_, err = i.buildOrgIndex(buildSignalCtx, signal.orgID)
|
2022-06-22 05:12:07 -05:00
|
|
|
|
signal.done <- err
|
|
|
|
|
reIndexDoneCh <- lastIndexedEventID
|
|
|
|
|
}()
|
2022-06-22 04:21:43 -05:00
|
|
|
|
case <-fullReIndexTimer.C:
|
2022-09-20 09:49:44 -05:00
|
|
|
|
fullReindexCtx, span := i.tracer.Start(ctx, "searchV2 full reindex timer")
|
|
|
|
|
|
2022-06-27 10:11:08 -05:00
|
|
|
|
// Periodically rebuild indexes since we could miss updates. At this moment we are issuing
|
|
|
|
|
// entity events non-atomically (outside of transaction) and do not cover all possible dashboard
|
|
|
|
|
// change places, so periodic re-indexing fixes possibly broken state. But ideally we should
|
|
|
|
|
// come to an approach which does not require periodic re-indexing at all. One possible way
|
|
|
|
|
// is to use DB triggers, see https://github.com/grafana/grafana/pull/47712.
|
2022-06-22 04:21:43 -05:00
|
|
|
|
lastIndexedEventID := lastEventID
|
|
|
|
|
go func() {
|
2022-09-20 09:49:44 -05:00
|
|
|
|
defer span.End()
|
2022-06-22 04:21:43 -05:00
|
|
|
|
// Do full re-index asynchronously to avoid blocking index synchronization
|
|
|
|
|
// on read for a long time.
|
2022-06-27 10:11:08 -05:00
|
|
|
|
|
|
|
|
|
// We need semaphore here since re-indexing due to build signal may be in progress already.
|
|
|
|
|
asyncReIndexSemaphore <- struct{}{}
|
|
|
|
|
defer func() { <-asyncReIndexSemaphore }()
|
|
|
|
|
|
2022-06-22 04:21:43 -05:00
|
|
|
|
started := time.Now()
|
2022-09-20 09:49:44 -05:00
|
|
|
|
i.logger.Info("Start re-indexing", i.withCtxData(fullReindexCtx)...)
|
|
|
|
|
i.reIndexFromScratch(fullReindexCtx)
|
|
|
|
|
i.logger.Info("Full re-indexing finished", i.withCtxData(fullReindexCtx, "fullReIndexElapsed", time.Since(started))...)
|
2022-06-22 04:21:43 -05:00
|
|
|
|
reIndexDoneCh <- lastIndexedEventID
|
|
|
|
|
}()
|
|
|
|
|
case lastIndexedEventID := <-reIndexDoneCh:
|
|
|
|
|
// Asynchronous re-indexing is finished. Set lastEventID to the value which
|
|
|
|
|
// was actual at the re-indexing start – so that we could re-apply all the
|
|
|
|
|
// events happened during async index build process and make sure it's consistent.
|
|
|
|
|
if lastEventID != lastIndexedEventID {
|
|
|
|
|
i.logger.Info("Re-apply event ID to last indexed", "currentEventID", lastEventID, "lastIndexedEventID", lastIndexedEventID)
|
|
|
|
|
lastEventID = lastIndexedEventID
|
|
|
|
|
// Apply events immediately.
|
|
|
|
|
partialUpdateTimer.Reset(0)
|
|
|
|
|
}
|
|
|
|
|
fullReIndexTimer.Reset(reIndexInterval)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
case <-ctx.Done():
|
|
|
|
|
return ctx.Err()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) buildInitialIndexes(ctx context.Context, orgIDs []int64) error {
|
2022-06-22 05:12:07 -05:00
|
|
|
|
started := time.Now()
|
|
|
|
|
i.logger.Info("Start building in-memory indexes")
|
|
|
|
|
for _, orgID := range orgIDs {
|
|
|
|
|
err := i.buildInitialIndex(ctx, orgID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("can't build initial dashboard search index for org %d: %w", orgID, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
i.logger.Info("Finish building in-memory indexes", "elapsed", time.Since(started))
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) buildInitialIndex(ctx context.Context, orgID int64) error {
|
2022-06-22 07:49:26 -05:00
|
|
|
|
debugCtx, debugCtxCancel := context.WithCancel(ctx)
|
2022-05-19 10:57:26 -05:00
|
|
|
|
if os.Getenv("GF_SEARCH_DEBUG") != "" {
|
2022-06-22 07:49:26 -05:00
|
|
|
|
go i.debugResourceUsage(debugCtx, 200*time.Millisecond)
|
2022-05-19 10:57:26 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
started := time.Now()
|
2022-06-22 05:12:07 -05:00
|
|
|
|
numDashboards, err := i.buildOrgIndex(ctx, orgID)
|
2022-05-19 10:57:26 -05:00
|
|
|
|
if err != nil {
|
2022-06-22 07:49:26 -05:00
|
|
|
|
debugCtxCancel()
|
|
|
|
|
return fmt.Errorf("can't build dashboard search index for org ID 1: %w", err)
|
2022-05-19 10:57:26 -05:00
|
|
|
|
}
|
2022-06-22 05:12:07 -05:00
|
|
|
|
i.logger.Info("Indexing for org finished", "orgIndexElapsed", time.Since(started), "orgId", orgID, "numDashboards", numDashboards)
|
2022-06-22 07:49:26 -05:00
|
|
|
|
debugCtxCancel()
|
2022-05-19 10:57:26 -05:00
|
|
|
|
|
|
|
|
|
if os.Getenv("GF_SEARCH_DEBUG") != "" {
|
|
|
|
|
// May help to estimate size of index when introducing changes. Though it's not a direct
|
|
|
|
|
// match to a memory consumption, but at least make give some relative difference understanding.
|
|
|
|
|
// Moreover, changes in indexing can cause additional memory consumption upon initial index build
|
|
|
|
|
// which is not reflected here.
|
2022-06-22 05:12:07 -05:00
|
|
|
|
i.reportSizeOfIndexDiskBackup(orgID)
|
2022-05-19 10:57:26 -05:00
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-22 07:49:26 -05:00
|
|
|
|
// This is a naive implementation of process CPU getting (credits to
|
|
|
|
|
// https://stackoverflow.com/a/11357813/1288429). Should work on both Linux and Darwin.
|
|
|
|
|
// Since we only use this during development – seems simple and cheap solution to get
|
|
|
|
|
// process CPU usage in cross-platform way.
|
|
|
|
|
func getProcessCPU(currentPid int) (float64, error) {
|
|
|
|
|
cmd := exec.Command("ps", "aux")
|
|
|
|
|
var out bytes.Buffer
|
|
|
|
|
cmd.Stdout = &out
|
|
|
|
|
err := cmd.Run()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
for {
|
|
|
|
|
line, err := out.ReadString('\n')
|
|
|
|
|
if err != nil {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
tokens := strings.Split(line, " ")
|
|
|
|
|
ft := make([]string, 0)
|
|
|
|
|
for _, t := range tokens {
|
|
|
|
|
if t != "" && t != "\t" {
|
|
|
|
|
ft = append(ft, t)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
pid, err := strconv.Atoi(ft[1])
|
|
|
|
|
if err != nil {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
if pid != currentPid {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
cpu, err := strconv.ParseFloat(ft[2], 64)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return 0, err
|
|
|
|
|
}
|
|
|
|
|
return cpu, nil
|
|
|
|
|
}
|
|
|
|
|
return 0, errors.New("process not found")
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) debugResourceUsage(ctx context.Context, frequency time.Duration) {
|
2022-05-19 10:57:26 -05:00
|
|
|
|
var maxHeapInuse uint64
|
|
|
|
|
var maxSys uint64
|
|
|
|
|
|
|
|
|
|
captureMemStats := func() {
|
|
|
|
|
var m runtime.MemStats
|
|
|
|
|
runtime.ReadMemStats(&m)
|
|
|
|
|
if m.HeapInuse > maxHeapInuse {
|
|
|
|
|
maxHeapInuse = m.HeapInuse
|
|
|
|
|
}
|
|
|
|
|
if m.Sys > maxSys {
|
|
|
|
|
maxSys = m.Sys
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-22 07:49:26 -05:00
|
|
|
|
var cpuUtilization []float64
|
|
|
|
|
|
|
|
|
|
captureCPUStats := func() {
|
|
|
|
|
cpu, err := getProcessCPU(os.Getpid())
|
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Error("CPU stats error", "error", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
// Just collect CPU utilization to a slice and show in the of index build.
|
|
|
|
|
cpuUtilization = append(cpuUtilization, cpu)
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-19 10:57:26 -05:00
|
|
|
|
captureMemStats()
|
2022-06-22 07:49:26 -05:00
|
|
|
|
captureCPUStats()
|
2022-05-19 10:57:26 -05:00
|
|
|
|
|
|
|
|
|
for {
|
|
|
|
|
select {
|
|
|
|
|
case <-ctx.Done():
|
2022-06-22 07:49:26 -05:00
|
|
|
|
i.logger.Warn("Resource usage during indexing", "maxHeapInUse", formatBytes(maxHeapInuse), "maxSys", formatBytes(maxSys), "cpuPercent", cpuUtilization)
|
2022-05-19 10:57:26 -05:00
|
|
|
|
return
|
|
|
|
|
case <-time.After(frequency):
|
|
|
|
|
captureMemStats()
|
2022-06-22 07:49:26 -05:00
|
|
|
|
captureCPUStats()
|
2022-05-19 10:57:26 -05:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) reportSizeOfIndexDiskBackup(orgID int64) {
|
|
|
|
|
index, _ := i.getOrgIndex(orgID)
|
|
|
|
|
reader, cancel, err := index.readerForIndex(indexTypeDashboard)
|
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Warn("Error getting reader", "error", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer cancel()
|
2022-05-19 10:57:26 -05:00
|
|
|
|
|
|
|
|
|
// create a temp directory to store the index
|
2022-08-10 08:37:51 -05:00
|
|
|
|
tmpDir, err := os.MkdirTemp("", "grafana.dashboard_index")
|
2022-05-19 10:57:26 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Error("can't create temp dir", "error", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer func() {
|
|
|
|
|
err := os.RemoveAll(tmpDir)
|
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Error("can't remove temp dir", "error", err, "tmpDir", tmpDir)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
cancelCh := make(chan struct{})
|
|
|
|
|
err = reader.Backup(tmpDir, cancelCh)
|
2022-05-19 10:57:26 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Error("can't create index disk backup", "error", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size, err := dirSize(tmpDir)
|
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Error("can't calculate dir size", "error", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i.logger.Warn("Size of index disk backup", "size", formatBytes(uint64(size)))
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) buildOrgIndex(ctx context.Context, orgID int64) (int, error) {
|
2022-09-21 09:14:52 -05:00
|
|
|
|
spanCtx, span := i.tracer.Start(ctx, "searchV2 buildOrgIndex")
|
|
|
|
|
span.SetAttributes("org_id", orgID, attribute.Key("org_id").Int64(orgID))
|
|
|
|
|
|
2022-05-16 18:22:45 -05:00
|
|
|
|
started := time.Now()
|
2022-09-21 09:14:52 -05:00
|
|
|
|
ctx, cancel := context.WithTimeout(spanCtx, time.Minute)
|
2022-09-20 09:49:44 -05:00
|
|
|
|
ctx = log.InitCounter(ctx)
|
|
|
|
|
|
2022-09-21 09:14:52 -05:00
|
|
|
|
defer func() {
|
|
|
|
|
span.End()
|
|
|
|
|
cancel()
|
|
|
|
|
}()
|
2022-05-16 18:22:45 -05:00
|
|
|
|
|
|
|
|
|
i.logger.Info("Start building org index", "orgId", orgID)
|
|
|
|
|
dashboards, err := i.loader.LoadDashboards(ctx, orgID, "")
|
2022-09-20 16:50:29 -05:00
|
|
|
|
orgSearchIndexLoadTime := time.Since(started)
|
|
|
|
|
|
2022-05-16 18:22:45 -05:00
|
|
|
|
if err != nil {
|
2022-09-20 16:50:29 -05:00
|
|
|
|
return 0, fmt.Errorf("error loading dashboards: %w, elapsed: %s", err, orgSearchIndexLoadTime.String())
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
2022-05-16 18:22:45 -05:00
|
|
|
|
i.logger.Info("Finish loading org dashboards", "elapsed", orgSearchIndexLoadTime, "orgId", orgID)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
|
2022-05-19 11:46:18 -05:00
|
|
|
|
dashboardExtender := i.extender.GetDashboardExtender(orgID)
|
2022-09-20 09:49:44 -05:00
|
|
|
|
|
2022-09-21 09:14:52 -05:00
|
|
|
|
_, initOrgIndexSpan := i.tracer.Start(ctx, "searchV2 buildOrgIndex init org index")
|
2022-09-20 09:49:44 -05:00
|
|
|
|
initOrgIndexSpan.SetAttributes("org_id", orgID, attribute.Key("org_id").Int64(orgID))
|
2022-09-21 09:14:52 -05:00
|
|
|
|
initOrgIndexSpan.SetAttributes("dashboardCount", len(dashboards), attribute.Key("dashboardCount").Int(len(dashboards)))
|
2022-09-20 09:49:44 -05:00
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
index, err := initOrgIndex(dashboards, i.logger, dashboardExtender)
|
2022-09-20 09:49:44 -05:00
|
|
|
|
|
|
|
|
|
initOrgIndexSpan.End()
|
|
|
|
|
|
2022-05-16 18:22:45 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
return 0, fmt.Errorf("error initializing index: %w", err)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
2022-05-16 18:22:45 -05:00
|
|
|
|
orgSearchIndexTotalTime := time.Since(started)
|
|
|
|
|
orgSearchIndexBuildTime := orgSearchIndexTotalTime - orgSearchIndexLoadTime
|
|
|
|
|
|
|
|
|
|
i.logger.Info("Re-indexed dashboards for organization",
|
2022-09-20 09:49:44 -05:00
|
|
|
|
i.withCtxData(ctx, "orgId", orgID,
|
|
|
|
|
"orgSearchIndexLoadTime", orgSearchIndexLoadTime,
|
|
|
|
|
"orgSearchIndexBuildTime", orgSearchIndexBuildTime,
|
|
|
|
|
"orgSearchIndexTotalTime", orgSearchIndexTotalTime,
|
|
|
|
|
"orgSearchDashboardCount", len(dashboards))...)
|
2022-05-16 18:22:45 -05:00
|
|
|
|
|
|
|
|
|
i.mu.Lock()
|
2022-07-04 03:33:07 -05:00
|
|
|
|
if oldIndex, ok := i.perOrgIndex[orgID]; ok {
|
|
|
|
|
for _, w := range oldIndex.writers {
|
|
|
|
|
_ = w.Close()
|
|
|
|
|
}
|
2022-05-24 01:42:21 -05:00
|
|
|
|
}
|
2022-07-04 03:33:07 -05:00
|
|
|
|
i.perOrgIndex[orgID] = index
|
2022-05-16 18:22:45 -05:00
|
|
|
|
i.mu.Unlock()
|
2022-05-17 17:25:28 -05:00
|
|
|
|
|
2022-08-26 03:36:41 -05:00
|
|
|
|
i.initializationMutex.Lock()
|
|
|
|
|
i.initializedOrgs[orgID] = true
|
|
|
|
|
i.initializationMutex.Unlock()
|
|
|
|
|
|
2022-05-17 17:25:28 -05:00
|
|
|
|
if orgID == 1 {
|
2022-07-04 03:33:07 -05:00
|
|
|
|
go func() {
|
|
|
|
|
if reader, cancel, err := index.readerForIndex(indexTypeDashboard); err == nil {
|
|
|
|
|
defer cancel()
|
2022-09-21 09:14:52 -05:00
|
|
|
|
updateUsageStats(context.Background(), reader, i.logger, i.tracer)
|
2022-07-04 03:33:07 -05:00
|
|
|
|
}
|
|
|
|
|
}()
|
2022-05-17 17:25:28 -05:00
|
|
|
|
}
|
2022-05-16 18:22:45 -05:00
|
|
|
|
return len(dashboards), nil
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) getOrgIndex(orgID int64) (*orgIndex, bool) {
|
2022-05-16 18:22:45 -05:00
|
|
|
|
i.mu.RLock()
|
|
|
|
|
defer i.mu.RUnlock()
|
2022-07-04 03:33:07 -05:00
|
|
|
|
r, ok := i.perOrgIndex[orgID]
|
2022-05-16 18:22:45 -05:00
|
|
|
|
return r, ok
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) getOrCreateOrgIndex(ctx context.Context, orgID int64) (*orgIndex, error) {
|
|
|
|
|
index, ok := i.getOrgIndex(orgID)
|
2022-06-22 05:12:07 -05:00
|
|
|
|
if !ok {
|
|
|
|
|
// For non-main organization indexes are built lazily.
|
|
|
|
|
// If we don't have an index then we are blocking here until an index for
|
|
|
|
|
// an organization is ready. This actually takes time only during the first
|
|
|
|
|
// access, all the consequent search requests do not fall into this branch.
|
|
|
|
|
doneIndexing := make(chan error, 1)
|
|
|
|
|
signal := buildSignal{orgID: orgID, done: doneIndexing}
|
|
|
|
|
select {
|
|
|
|
|
case i.buildSignals <- signal:
|
|
|
|
|
case <-ctx.Done():
|
|
|
|
|
return nil, ctx.Err()
|
|
|
|
|
}
|
|
|
|
|
select {
|
|
|
|
|
case err := <-doneIndexing:
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
case <-ctx.Done():
|
|
|
|
|
return nil, ctx.Err()
|
|
|
|
|
}
|
2022-07-04 03:33:07 -05:00
|
|
|
|
index, _ = i.getOrgIndex(orgID)
|
2022-06-22 05:12:07 -05:00
|
|
|
|
}
|
2022-07-04 03:33:07 -05:00
|
|
|
|
return index, nil
|
2022-05-16 18:22:45 -05:00
|
|
|
|
}
|
2022-05-09 03:00:09 -05:00
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) reIndexFromScratch(ctx context.Context) {
|
2022-05-09 03:00:09 -05:00
|
|
|
|
i.mu.RLock()
|
2022-07-04 03:33:07 -05:00
|
|
|
|
orgIDs := make([]int64, 0, len(i.perOrgIndex))
|
|
|
|
|
for orgID := range i.perOrgIndex {
|
2022-05-09 03:00:09 -05:00
|
|
|
|
orgIDs = append(orgIDs, orgID)
|
|
|
|
|
}
|
|
|
|
|
i.mu.RUnlock()
|
|
|
|
|
|
|
|
|
|
for _, orgID := range orgIDs {
|
2022-05-16 18:22:45 -05:00
|
|
|
|
_, err := i.buildOrgIndex(ctx, orgID)
|
2022-05-09 03:00:09 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Error("Error re-indexing dashboards for organization", "orgId", orgID, "error", err)
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-20 09:49:44 -05:00
|
|
|
|
func (i *searchIndex) withCtxData(ctx context.Context, params ...interface{}) []interface{} {
|
|
|
|
|
traceID := tracing.TraceIDFromContext(ctx, false)
|
|
|
|
|
if traceID != "" {
|
|
|
|
|
params = append(params, "traceID", traceID)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if i.features.IsEnabled(featuremgmt.FlagDatabaseMetrics) {
|
|
|
|
|
params = append(params, "db_call_count", log.TotalDBCallCount(ctx))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return params
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) applyIndexUpdates(ctx context.Context, lastEventID int64) int64 {
|
2022-09-20 09:49:44 -05:00
|
|
|
|
ctx = log.InitCounter(ctx)
|
|
|
|
|
events, err := i.eventStore.GetAllEventsAfter(ctx, lastEventID)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Error("can't load events", "error", err)
|
|
|
|
|
return lastEventID
|
|
|
|
|
}
|
|
|
|
|
if len(events) == 0 {
|
|
|
|
|
return lastEventID
|
|
|
|
|
}
|
|
|
|
|
started := time.Now()
|
|
|
|
|
for _, e := range events {
|
|
|
|
|
err := i.applyEventOnIndex(ctx, e)
|
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Error("can't apply event", "error", err)
|
|
|
|
|
return lastEventID
|
|
|
|
|
}
|
|
|
|
|
lastEventID = e.Id
|
|
|
|
|
}
|
2022-09-20 09:49:44 -05:00
|
|
|
|
i.logger.Info("Index updates applied", i.withCtxData(ctx, "indexEventsAppliedElapsed", time.Since(started), "numEvents", len(events))...)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
return lastEventID
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) applyEventOnIndex(ctx context.Context, e *store.EntityEvent) error {
|
2022-06-03 15:11:32 -05:00
|
|
|
|
i.logger.Debug("processing event", "event", e)
|
|
|
|
|
|
2022-04-27 03:29:39 -05:00
|
|
|
|
if !strings.HasPrefix(e.EntityId, "database/") {
|
|
|
|
|
i.logger.Warn("unknown storage", "entityId", e.EntityId)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2022-06-03 15:11:32 -05:00
|
|
|
|
// database/org/entityType/path*
|
|
|
|
|
parts := strings.SplitN(strings.TrimPrefix(e.EntityId, "database/"), "/", 3)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
if len(parts) != 3 {
|
|
|
|
|
i.logger.Error("can't parse entityId", "entityId", e.EntityId)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
orgIDStr := parts[0]
|
2022-06-03 15:11:32 -05:00
|
|
|
|
orgID, err := strconv.ParseInt(orgIDStr, 10, 64)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
i.logger.Error("can't extract org ID", "entityId", e.EntityId)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2022-06-03 15:11:32 -05:00
|
|
|
|
kind := store.EntityType(parts[1])
|
|
|
|
|
uid := parts[2]
|
|
|
|
|
return i.applyEvent(ctx, orgID, kind, uid, e.EventType)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) applyEvent(ctx context.Context, orgID int64, kind store.EntityType, uid string, _ store.EntityEventType) error {
|
2022-04-27 03:29:39 -05:00
|
|
|
|
i.mu.Lock()
|
2022-07-04 03:33:07 -05:00
|
|
|
|
_, ok := i.perOrgIndex[orgID]
|
2022-04-27 03:29:39 -05:00
|
|
|
|
if !ok {
|
|
|
|
|
// Skip event for org not yet indexed.
|
|
|
|
|
i.mu.Unlock()
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
i.mu.Unlock()
|
|
|
|
|
|
2022-06-03 15:11:32 -05:00
|
|
|
|
// Both dashboard and folder share same DB table.
|
|
|
|
|
dbDashboards, err := i.loader.LoadDashboards(ctx, orgID, uid)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i.mu.Lock()
|
|
|
|
|
defer i.mu.Unlock()
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
index, ok := i.perOrgIndex[orgID]
|
2022-05-16 18:22:45 -05:00
|
|
|
|
if !ok {
|
|
|
|
|
// Skip event for org not yet fully indexed.
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-27 03:29:39 -05:00
|
|
|
|
// In the future we can rely on operation types to reduce work here.
|
|
|
|
|
if len(dbDashboards) == 0 {
|
2022-06-03 15:11:32 -05:00
|
|
|
|
switch kind {
|
|
|
|
|
case store.EntityTypeDashboard:
|
2022-07-04 03:33:07 -05:00
|
|
|
|
err = i.removeDashboard(ctx, index, uid)
|
2022-06-03 15:11:32 -05:00
|
|
|
|
case store.EntityTypeFolder:
|
2022-07-04 03:33:07 -05:00
|
|
|
|
err = i.removeFolder(ctx, index, uid)
|
2022-06-03 15:11:32 -05:00
|
|
|
|
default:
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2022-04-27 03:29:39 -05:00
|
|
|
|
} else {
|
2022-07-04 03:33:07 -05:00
|
|
|
|
err = i.updateDashboard(ctx, orgID, index, dbDashboards[0])
|
2022-05-16 18:22:45 -05:00
|
|
|
|
}
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) removeDashboard(_ context.Context, index *orgIndex, dashboardUID string) error {
|
|
|
|
|
dashboardLocation, ok, err := getDashboardLocation(index, dashboardUID)
|
2022-06-03 15:11:32 -05:00
|
|
|
|
if err != nil {
|
2022-07-04 03:33:07 -05:00
|
|
|
|
return err
|
2022-06-03 15:11:32 -05:00
|
|
|
|
}
|
|
|
|
|
if !ok {
|
|
|
|
|
// No dashboard, nothing to remove.
|
2022-07-04 03:33:07 -05:00
|
|
|
|
return nil
|
2022-06-03 15:11:32 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-05-16 18:22:45 -05:00
|
|
|
|
// Find all panel docs to remove with dashboard.
|
2022-06-03 15:11:32 -05:00
|
|
|
|
panelLocation := dashboardUID
|
|
|
|
|
if dashboardLocation != "" {
|
|
|
|
|
panelLocation = dashboardLocation + "/" + dashboardUID
|
|
|
|
|
}
|
2022-07-04 03:33:07 -05:00
|
|
|
|
panelIDs, err := getDocsIDsByLocationPrefix(index, panelLocation)
|
2022-05-16 18:22:45 -05:00
|
|
|
|
if err != nil {
|
2022-07-04 03:33:07 -05:00
|
|
|
|
return err
|
2022-05-16 18:22:45 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
writer := index.writerForIndex(indexTypeDashboard)
|
|
|
|
|
|
2022-05-16 18:22:45 -05:00
|
|
|
|
batch := bluge.NewBatch()
|
|
|
|
|
batch.Delete(bluge.NewDocument(dashboardUID).ID())
|
|
|
|
|
for _, panelID := range panelIDs {
|
|
|
|
|
batch.Delete(bluge.NewDocument(panelID).ID())
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
2022-05-16 18:22:45 -05:00
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
return writer.Batch(batch)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) removeFolder(_ context.Context, index *orgIndex, folderUID string) error {
|
|
|
|
|
ids, err := getDocsIDsByLocationPrefix(index, folderUID)
|
2022-06-03 15:11:32 -05:00
|
|
|
|
if err != nil {
|
2022-07-04 03:33:07 -05:00
|
|
|
|
return fmt.Errorf("error getting by location prefix: %w", err)
|
2022-06-03 15:11:32 -05:00
|
|
|
|
}
|
2022-07-04 03:33:07 -05:00
|
|
|
|
|
2022-06-03 15:11:32 -05:00
|
|
|
|
batch := bluge.NewBatch()
|
|
|
|
|
batch.Delete(bluge.NewDocument(folderUID).ID())
|
|
|
|
|
for _, id := range ids {
|
|
|
|
|
batch.Delete(bluge.NewDocument(id).ID())
|
|
|
|
|
}
|
2022-07-04 03:33:07 -05:00
|
|
|
|
writer := index.writerForIndex(indexTypeDashboard)
|
|
|
|
|
return writer.Batch(batch)
|
2022-06-03 15:11:32 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-05-16 18:22:45 -05:00
|
|
|
|
func stringInSlice(str string, slice []string) bool {
|
|
|
|
|
for _, s := range slice {
|
|
|
|
|
if s == str {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|
2022-04-27 03:29:39 -05:00
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
func (i *searchIndex) updateDashboard(ctx context.Context, orgID int64, index *orgIndex, dash dashboard) error {
|
2022-05-19 11:46:18 -05:00
|
|
|
|
extendDoc := i.extender.GetDashboardExtender(orgID, dash.uid)
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
writer := index.writerForIndex(indexTypeDashboard)
|
|
|
|
|
|
2022-05-16 18:22:45 -05:00
|
|
|
|
var doc *bluge.Document
|
|
|
|
|
if dash.isFolder {
|
|
|
|
|
doc = getFolderDashboardDoc(dash)
|
2022-05-19 11:46:18 -05:00
|
|
|
|
if err := extendDoc(dash.uid, doc); err != nil {
|
2022-07-04 03:33:07 -05:00
|
|
|
|
return err
|
2022-05-19 11:46:18 -05:00
|
|
|
|
}
|
2022-07-04 03:33:07 -05:00
|
|
|
|
return writer.Update(doc.ID(), doc)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
batch := bluge.NewBatch()
|
|
|
|
|
|
|
|
|
|
var folderUID string
|
|
|
|
|
if dash.folderID == 0 {
|
2023-01-27 06:12:30 -06:00
|
|
|
|
folderUID = folder.GeneralFolderUID
|
2022-04-27 03:29:39 -05:00
|
|
|
|
} else {
|
2022-07-04 03:33:07 -05:00
|
|
|
|
var err error
|
|
|
|
|
folderUID, err = i.folderIdLookup(ctx, dash.folderID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
2022-05-16 18:22:45 -05:00
|
|
|
|
}
|
2022-07-04 03:33:07 -05:00
|
|
|
|
}
|
2022-05-16 18:22:45 -05:00
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
location := folderUID
|
|
|
|
|
doc = getNonFolderDashboardDoc(dash, location)
|
|
|
|
|
if err := extendDoc(dash.uid, doc); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2022-05-16 18:22:45 -05:00
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
if location != "" {
|
|
|
|
|
location += "/"
|
|
|
|
|
}
|
|
|
|
|
location += dash.uid
|
|
|
|
|
panelDocs := getDashboardPanelDocs(dash, location)
|
2023-01-17 05:50:17 -06:00
|
|
|
|
actualPanelIDs := make([]string, 0, len(panelDocs))
|
2022-07-04 03:33:07 -05:00
|
|
|
|
for _, panelDoc := range panelDocs {
|
|
|
|
|
actualPanelIDs = append(actualPanelIDs, string(panelDoc.ID().Term()))
|
|
|
|
|
batch.Update(panelDoc.ID(), panelDoc)
|
|
|
|
|
}
|
2022-05-16 18:22:45 -05:00
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
indexedPanelIDs, err := getDashboardPanelIDs(index, location)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2022-05-16 18:22:45 -05:00
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
for _, panelID := range indexedPanelIDs {
|
|
|
|
|
if !stringInSlice(panelID, actualPanelIDs) {
|
|
|
|
|
batch.Delete(bluge.NewDocument(panelID).ID())
|
2022-05-16 18:22:45 -05:00
|
|
|
|
}
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
2022-05-16 18:22:45 -05:00
|
|
|
|
|
|
|
|
|
batch.Update(doc.ID(), doc)
|
|
|
|
|
|
2022-07-04 03:33:07 -05:00
|
|
|
|
return writer.Batch(batch)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type sqlDashboardLoader struct {
|
2022-10-19 08:02:15 -05:00
|
|
|
|
sql db.DB
|
2022-09-20 18:09:55 -05:00
|
|
|
|
logger log.Logger
|
2022-09-21 09:14:52 -05:00
|
|
|
|
tracer tracing.Tracer
|
2022-09-20 18:09:55 -05:00
|
|
|
|
settings setting.SearchSettings
|
2022-04-28 11:29:09 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-10-19 08:02:15 -05:00
|
|
|
|
func newSQLDashboardLoader(sql db.DB, tracer tracing.Tracer, settings setting.SearchSettings) *sqlDashboardLoader {
|
2022-09-21 09:14:52 -05:00
|
|
|
|
return &sqlDashboardLoader{sql: sql, logger: log.New("sqlDashboardLoader"), tracer: tracer, settings: settings}
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-10-18 10:53:15 -05:00
|
|
|
|
type dashboardsRes struct {
|
|
|
|
|
dashboards []*dashboardQueryResult
|
|
|
|
|
err error
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (l sqlDashboardLoader) loadAllDashboards(ctx context.Context, limit int, orgID int64, dashboardUID string) chan *dashboardsRes {
|
|
|
|
|
ch := make(chan *dashboardsRes, 3)
|
|
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
|
defer close(ch)
|
|
|
|
|
|
|
|
|
|
var lastID int64
|
|
|
|
|
for {
|
|
|
|
|
select {
|
|
|
|
|
case <-ctx.Done():
|
|
|
|
|
err := ctx.Err()
|
|
|
|
|
if err != nil {
|
|
|
|
|
ch <- &dashboardsRes{
|
|
|
|
|
dashboards: nil,
|
|
|
|
|
err: err,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
default:
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dashboardQueryCtx, dashboardQuerySpan := l.tracer.Start(ctx, "sqlDashboardLoader dashboardQuery")
|
|
|
|
|
dashboardQuerySpan.SetAttributes("orgID", orgID, attribute.Key("orgID").Int64(orgID))
|
|
|
|
|
dashboardQuerySpan.SetAttributes("dashboardUID", dashboardUID, attribute.Key("dashboardUID").String(dashboardUID))
|
|
|
|
|
dashboardQuerySpan.SetAttributes("lastID", lastID, attribute.Key("lastID").Int64(lastID))
|
|
|
|
|
|
2022-11-15 01:25:13 -06:00
|
|
|
|
rows := make([]*dashboardQueryResult, 0)
|
2022-10-19 08:02:15 -05:00
|
|
|
|
err := l.sql.WithDbSession(dashboardQueryCtx, func(sess *db.Session) error {
|
2022-11-15 01:25:13 -06:00
|
|
|
|
sess.Table("dashboard").
|
|
|
|
|
Where("org_id = ?", orgID)
|
2022-10-18 10:53:15 -05:00
|
|
|
|
|
|
|
|
|
if lastID > 0 {
|
2022-11-15 01:25:13 -06:00
|
|
|
|
sess.Where("id > ?", lastID)
|
2022-10-18 10:53:15 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if dashboardUID != "" {
|
2022-11-15 01:25:13 -06:00
|
|
|
|
sess.Where("uid = ?", dashboardUID)
|
2022-10-18 10:53:15 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-11-15 01:25:13 -06:00
|
|
|
|
sess.Cols("id", "uid", "is_folder", "folder_id", "data", "slug", "created", "updated")
|
2022-10-18 10:53:15 -05:00
|
|
|
|
|
2022-11-15 01:25:13 -06:00
|
|
|
|
sess.OrderBy("id ASC")
|
|
|
|
|
sess.Limit(limit)
|
2022-10-27 01:52:42 -05:00
|
|
|
|
|
2022-11-15 01:25:13 -06:00
|
|
|
|
return sess.Find(&rows)
|
|
|
|
|
})
|
2022-10-27 01:52:42 -05:00
|
|
|
|
|
2022-10-18 10:53:15 -05:00
|
|
|
|
dashboardQuerySpan.End()
|
|
|
|
|
|
2022-11-15 01:25:13 -06:00
|
|
|
|
if err != nil || len(rows) < limit || dashboardUID != "" {
|
2022-10-18 10:53:15 -05:00
|
|
|
|
ch <- &dashboardsRes{
|
|
|
|
|
dashboards: rows,
|
|
|
|
|
err: err,
|
|
|
|
|
}
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ch <- &dashboardsRes{
|
|
|
|
|
dashboards: rows,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if len(rows) > 0 {
|
|
|
|
|
lastID = rows[len(rows)-1].Id
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
return ch
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-27 03:29:39 -05:00
|
|
|
|
func (l sqlDashboardLoader) LoadDashboards(ctx context.Context, orgID int64, dashboardUID string) ([]dashboard, error) {
|
2022-09-21 09:14:52 -05:00
|
|
|
|
ctx, span := l.tracer.Start(ctx, "sqlDashboardLoader LoadDashboards")
|
|
|
|
|
span.SetAttributes("orgID", orgID, attribute.Key("orgID").Int64(orgID))
|
|
|
|
|
|
|
|
|
|
defer span.End()
|
|
|
|
|
|
2022-04-27 03:29:39 -05:00
|
|
|
|
var dashboards []dashboard
|
|
|
|
|
|
|
|
|
|
limit := 1
|
|
|
|
|
|
|
|
|
|
if dashboardUID == "" {
|
2023-01-27 07:40:04 -06:00
|
|
|
|
limit = l.settings.DashboardLoadingBatchSize
|
|
|
|
|
dashboards = make([]dashboard, 0, limit)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
2022-09-21 09:14:52 -05:00
|
|
|
|
loadDatasourceCtx, loadDatasourceSpan := l.tracer.Start(ctx, "sqlDashboardLoader LoadDatasourceLookup")
|
|
|
|
|
loadDatasourceSpan.SetAttributes("orgID", orgID, attribute.Key("orgID").Int64(orgID))
|
|
|
|
|
|
2022-04-27 03:29:39 -05:00
|
|
|
|
// key will allow name or uid
|
2022-10-08 11:05:46 -05:00
|
|
|
|
lookup, err := kdash.LoadDatasourceLookup(loadDatasourceCtx, orgID, l.sql)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
if err != nil {
|
2022-09-21 09:14:52 -05:00
|
|
|
|
loadDatasourceSpan.End()
|
2022-04-27 03:29:39 -05:00
|
|
|
|
return dashboards, err
|
|
|
|
|
}
|
2022-09-21 09:14:52 -05:00
|
|
|
|
loadDatasourceSpan.End()
|
2022-04-27 03:29:39 -05:00
|
|
|
|
|
2022-10-18 10:53:15 -05:00
|
|
|
|
loadingDashboardCtx, cancelLoadingDashboardCtx := context.WithCancel(ctx)
|
|
|
|
|
defer cancelLoadingDashboardCtx()
|
2022-04-27 03:29:39 -05:00
|
|
|
|
|
2022-10-18 10:53:15 -05:00
|
|
|
|
dashboardsChannel := l.loadAllDashboards(loadingDashboardCtx, limit, orgID, dashboardUID)
|
2022-04-27 03:29:39 -05:00
|
|
|
|
|
2022-10-18 10:53:15 -05:00
|
|
|
|
for {
|
|
|
|
|
res, ok := <-dashboardsChannel
|
|
|
|
|
if res != nil && res.err != nil {
|
2022-11-15 01:25:13 -06:00
|
|
|
|
l.logger.Error("Error when loading dashboards", "error", err, "orgID", orgID, "dashboardUID", dashboardUID)
|
2022-10-18 10:53:15 -05:00
|
|
|
|
break
|
|
|
|
|
}
|
2022-04-27 03:29:39 -05:00
|
|
|
|
|
2022-10-18 10:53:15 -05:00
|
|
|
|
if res == nil || !ok {
|
|
|
|
|
break
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
2022-10-18 10:53:15 -05:00
|
|
|
|
|
|
|
|
|
rows := res.dashboards
|
2022-09-21 09:14:52 -05:00
|
|
|
|
|
2022-11-15 01:25:13 -06:00
|
|
|
|
_, readDashboardSpan := l.tracer.Start(ctx, "sqlDashboardLoader readDashboard")
|
2022-09-21 09:14:52 -05:00
|
|
|
|
readDashboardSpan.SetAttributes("orgID", orgID, attribute.Key("orgID").Int64(orgID))
|
|
|
|
|
readDashboardSpan.SetAttributes("dashboardCount", len(rows), attribute.Key("dashboardCount").Int(len(rows)))
|
2022-04-27 03:29:39 -05:00
|
|
|
|
|
2022-10-24 20:39:17 -05:00
|
|
|
|
reader := kdash.NewStaticDashboardSummaryBuilder(lookup, false)
|
2022-09-27 17:08:47 -05:00
|
|
|
|
|
2022-04-27 03:29:39 -05:00
|
|
|
|
for _, row := range rows {
|
2022-11-15 01:25:13 -06:00
|
|
|
|
summary, _, err := reader(ctx, row.Uid, row.Data)
|
2022-04-28 11:29:09 -05:00
|
|
|
|
if err != nil {
|
|
|
|
|
l.logger.Warn("Error indexing dashboard data", "error", err, "dashboardId", row.Id, "dashboardSlug", row.Slug)
|
|
|
|
|
// But append info anyway for now, since we possibly extracted useful information.
|
|
|
|
|
}
|
2022-04-27 03:29:39 -05:00
|
|
|
|
dashboards = append(dashboards, dashboard{
|
|
|
|
|
id: row.Id,
|
|
|
|
|
uid: row.Uid,
|
|
|
|
|
isFolder: row.IsFolder,
|
|
|
|
|
folderID: row.FolderID,
|
|
|
|
|
slug: row.Slug,
|
|
|
|
|
created: row.Created,
|
|
|
|
|
updated: row.Updated,
|
2022-10-08 11:05:46 -05:00
|
|
|
|
summary: summary,
|
2022-04-27 03:29:39 -05:00
|
|
|
|
})
|
|
|
|
|
}
|
2022-09-21 09:14:52 -05:00
|
|
|
|
readDashboardSpan.End()
|
2022-04-27 03:29:39 -05:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return dashboards, err
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-19 08:02:15 -05:00
|
|
|
|
func newFolderIDLookup(sql db.DB) folderUIDLookup {
|
2022-05-23 11:22:35 -05:00
|
|
|
|
return func(ctx context.Context, folderID int64) (string, error) {
|
|
|
|
|
uid := ""
|
2022-10-19 08:02:15 -05:00
|
|
|
|
err := sql.WithDbSession(ctx, func(sess *db.Session) error {
|
2022-05-23 11:22:35 -05:00
|
|
|
|
res, err := sess.Query("SELECT uid FROM dashboard WHERE id=?", folderID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
if len(res) > 0 {
|
|
|
|
|
uid = string(res[0]["uid"])
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
})
|
|
|
|
|
return uid, err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-27 03:29:39 -05:00
|
|
|
|
type dashboardQueryResult struct {
|
|
|
|
|
Id int64
|
|
|
|
|
Uid string
|
|
|
|
|
IsFolder bool `xorm:"is_folder"`
|
|
|
|
|
FolderID int64 `xorm:"folder_id"`
|
|
|
|
|
Slug string `xorm:"slug"`
|
|
|
|
|
Data []byte
|
|
|
|
|
Created time.Time
|
|
|
|
|
Updated time.Time
|
|
|
|
|
}
|