mirror of
https://github.com/grafana/grafana.git
synced 2025-02-12 00:25:46 -06:00
Search: remove internal ID from bluge index (#49205)
Co-authored-by: Alexander Emelin <frvzmb@gmail.com>
This commit is contained in:
parent
fe16680c6d
commit
9a59ccd644
@ -25,14 +25,13 @@ const (
|
||||
documentFieldTag = "tag"
|
||||
documentFieldURL = "url"
|
||||
documentFieldName = "name"
|
||||
documentFieldName_ngram = "name_ngram"
|
||||
documentFieldNameNgram = "name_ngram"
|
||||
documentFieldDescription = "description"
|
||||
documentFieldLocation = "location" // parent path
|
||||
documentFieldPanelType = "panel_type"
|
||||
documentFieldTransformer = "transformer"
|
||||
documentFieldDSUID = "ds_uid"
|
||||
documentFieldDSType = "ds_type"
|
||||
documentFieldInternalID = "__internal_id" // only for migrations! (indexed as a string)
|
||||
)
|
||||
|
||||
func initIndex(dashboards []dashboard, logger log.Logger, extendDoc ExtendDashboardFunc) (*bluge.Reader, *bluge.Writer, error) {
|
||||
@ -149,10 +148,9 @@ func getFolderDashboardDoc(dash dashboard) *bluge.Document {
|
||||
AddField(bluge.NewKeywordField(documentFieldKind, string(entityKindFolder)).Aggregatable().StoreValue()).
|
||||
AddField(bluge.NewKeywordField(documentFieldURL, url).StoreValue()).
|
||||
AddField(bluge.NewTextField(documentFieldName, dash.info.Title).StoreValue().SearchTermPositions()).
|
||||
AddField(getNameNGramField(dash.info.Title)).
|
||||
AddField(bluge.NewTextField(documentFieldDescription, dash.info.Description).SearchTermPositions()).
|
||||
// Add legacy ID (for lookup by internal ID)
|
||||
AddField(bluge.NewKeywordField(documentFieldInternalID, fmt.Sprintf("%d", dash.id)).Aggregatable().StoreValue())
|
||||
AddField(getNameNGramField(dash.info.Title)).
|
||||
AddField(bluge.NewTextField(documentFieldDescription, dash.info.Description).SearchTermPositions())
|
||||
}
|
||||
|
||||
func getNonFolderDashboardDoc(dash dashboard, location string) *bluge.Document {
|
||||
@ -167,9 +165,6 @@ func getNonFolderDashboardDoc(dash dashboard, location string) *bluge.Document {
|
||||
AddField(getNameNGramField(dash.info.Title)).
|
||||
AddField(bluge.NewTextField(documentFieldDescription, dash.info.Description).SearchTermPositions())
|
||||
|
||||
// Add legacy ID (for lookup by internal ID)
|
||||
doc.AddField(bluge.NewKeywordField(documentFieldInternalID, fmt.Sprintf("%d", dash.id)))
|
||||
|
||||
for _, tag := range dash.info.Tags {
|
||||
doc.AddField(bluge.NewKeywordField(documentFieldTag, tag).
|
||||
StoreValue().
|
||||
@ -255,36 +250,7 @@ var ngramQueryAnalyzer = &analysis.Analyzer{
|
||||
}
|
||||
|
||||
func getNameNGramField(name string) bluge.Field {
|
||||
return bluge.NewTextField(documentFieldName_ngram, name).WithAnalyzer(ngramIndexAnalyzer)
|
||||
}
|
||||
|
||||
func getDashboardFolderUID(reader *bluge.Reader, folderID int64) (string, error) {
|
||||
fullQuery := bluge.NewBooleanQuery()
|
||||
fullQuery.AddMust(bluge.NewTermQuery(strconv.FormatInt(folderID, 10)).SetField(documentFieldInternalID))
|
||||
fullQuery.AddMust(bluge.NewTermQuery(string(entityKindFolder)).SetField(documentFieldKind))
|
||||
req := bluge.NewAllMatches(fullQuery)
|
||||
req.WithStandardAggregations()
|
||||
documentMatchIterator, err := reader.Search(context.Background(), req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var uid string
|
||||
match, err := documentMatchIterator.Next()
|
||||
for err == nil && match != nil {
|
||||
// load the identifier for this match
|
||||
err = match.VisitStoredFields(func(field string, value []byte) bool {
|
||||
if field == documentFieldUID {
|
||||
uid = string(value)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// load the next document match
|
||||
match, err = documentMatchIterator.Next()
|
||||
}
|
||||
return uid, err
|
||||
return bluge.NewTextField(documentFieldNameNgram, name).WithAnalyzer(ngramIndexAnalyzer)
|
||||
}
|
||||
|
||||
func getDashboardPanelIDs(reader *bluge.Reader, dashboardUID string) ([]string, error) {
|
||||
@ -293,7 +259,6 @@ func getDashboardPanelIDs(reader *bluge.Reader, dashboardUID string) ([]string,
|
||||
fullQuery.AddMust(bluge.NewTermQuery(dashboardUID).SetField(documentFieldDSUID))
|
||||
fullQuery.AddMust(bluge.NewTermQuery(string(entityKindPanel)).SetField(documentFieldKind))
|
||||
req := bluge.NewAllMatches(fullQuery)
|
||||
req.WithStandardAggregations()
|
||||
documentMatchIterator, err := reader.Search(context.Background(), req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -320,7 +285,7 @@ func getDashboardPanelIDs(reader *bluge.Reader, dashboardUID string) ([]string,
|
||||
func doSearchQuery(ctx context.Context, logger log.Logger, reader *bluge.Reader, filter ResourceFilter, q DashboardQuery, extender QueryExtender) *backend.DataResponse {
|
||||
response := &backend.DataResponse{}
|
||||
|
||||
// Folder listing structure
|
||||
// Folder listing structure.
|
||||
idx := strings.Index(q.Query, ":")
|
||||
if idx > 0 {
|
||||
key := q.Query[0:idx]
|
||||
@ -345,7 +310,7 @@ func doSearchQuery(ctx context.Context, logger log.Logger, reader *bluge.Reader,
|
||||
fullQuery := bluge.NewBooleanQuery()
|
||||
fullQuery.AddMust(newPermissionFilter(filter, logger))
|
||||
|
||||
// Only show dashboard / folders
|
||||
// Only show dashboard / folders / panels.
|
||||
if len(q.Kind) > 0 {
|
||||
bq := bluge.NewBooleanQuery()
|
||||
for _, k := range q.Kind {
|
||||
@ -400,7 +365,7 @@ func doSearchQuery(ctx context.Context, logger log.Logger, reader *bluge.Reader,
|
||||
AddShould(bluge.NewMatchPhraseQuery(q.Query).SetField(documentFieldName).SetBoost(6)).
|
||||
AddShould(bluge.NewMatchPhraseQuery(q.Query).SetField(documentFieldDescription).SetBoost(3)).
|
||||
AddShould(bluge.NewMatchQuery(q.Query).
|
||||
SetField(documentFieldName_ngram).
|
||||
SetField(documentFieldNameNgram).
|
||||
SetAnalyzer(ngramQueryAnalyzer).SetBoost(1))
|
||||
|
||||
if len(q.Query) > 4 {
|
||||
@ -447,8 +412,6 @@ func doSearchQuery(ctx context.Context, logger log.Logger, reader *bluge.Reader,
|
||||
dvfieldNames := []string{"type"}
|
||||
sctx := search.NewSearchContext(0, 0)
|
||||
|
||||
// numericFields := map[string]bool{"schemaVersion": true, "panelCount": true}
|
||||
|
||||
fScore := data.NewFieldFromFieldType(data.FieldTypeFloat64, 0)
|
||||
fUID := data.NewFieldFromFieldType(data.FieldTypeString, 0)
|
||||
fKind := data.NewFieldFromFieldType(data.FieldTypeString, 0)
|
||||
@ -500,19 +463,10 @@ func doSearchQuery(ctx context.Context, logger log.Logger, reader *bluge.Reader,
|
||||
name := ""
|
||||
url := ""
|
||||
loc := ""
|
||||
var ds_uids []string
|
||||
var dsUIDs []string
|
||||
var tags []string
|
||||
|
||||
err = match.VisitStoredFields(func(field string, value []byte) bool {
|
||||
// if numericFields[field] {
|
||||
// num, err2 := bluge.DecodeNumericFloat64(value)
|
||||
// if err2 != nil {
|
||||
// vals[field] = num
|
||||
// }
|
||||
// } else {
|
||||
// vals[field] = string(value)
|
||||
// }
|
||||
|
||||
switch field {
|
||||
case documentFieldUID:
|
||||
uid = string(value)
|
||||
@ -527,7 +481,7 @@ func doSearchQuery(ctx context.Context, logger log.Logger, reader *bluge.Reader,
|
||||
case documentFieldLocation:
|
||||
loc = string(value)
|
||||
case documentFieldDSUID:
|
||||
ds_uids = append(ds_uids, string(value))
|
||||
dsUIDs = append(dsUIDs, string(value))
|
||||
case documentFieldTag:
|
||||
tags = append(tags, string(value))
|
||||
default:
|
||||
@ -563,8 +517,8 @@ func doSearchQuery(ctx context.Context, logger log.Logger, reader *bluge.Reader,
|
||||
fTags.Append(nil)
|
||||
}
|
||||
|
||||
if len(ds_uids) > 0 {
|
||||
js, _ := json.Marshal(ds_uids)
|
||||
if len(dsUIDs) > 0 {
|
||||
js, _ := json.Marshal(dsUIDs)
|
||||
jsb := json.RawMessage(js)
|
||||
fDSUIDs.Append(&jsb)
|
||||
} else {
|
||||
@ -598,7 +552,7 @@ func doSearchQuery(ctx context.Context, logger log.Logger, reader *bluge.Reader,
|
||||
aggs := documentMatchIterator.Aggregations()
|
||||
|
||||
header := &customMeta{
|
||||
Count: aggs.Count(), // Total cound
|
||||
Count: aggs.Count(), // Total count.
|
||||
}
|
||||
if q.Explain {
|
||||
header.MaxScore = aggs.Metric("max_score")
|
||||
|
@ -34,6 +34,10 @@ type eventStore interface {
|
||||
GetAllEventsAfter(ctx context.Context, id int64) ([]*store.EntityEvent, error)
|
||||
}
|
||||
|
||||
// While we migrate away from internal IDs... this lets us lookup values in SQL
|
||||
// NOTE: folderId is unique across all orgs
|
||||
type folderUIDLookup = func(ctx context.Context, folderId int64) (string, error)
|
||||
|
||||
type dashboard struct {
|
||||
id int64
|
||||
uid string
|
||||
@ -46,25 +50,27 @@ type dashboard struct {
|
||||
}
|
||||
|
||||
type dashboardIndex struct {
|
||||
mu sync.RWMutex
|
||||
loader dashboardLoader
|
||||
perOrgReader map[int64]*bluge.Reader // orgId -> bluge reader
|
||||
perOrgWriter map[int64]*bluge.Writer // orgId -> bluge writer
|
||||
eventStore eventStore
|
||||
logger log.Logger
|
||||
buildSignals chan int64
|
||||
extender DocumentExtender
|
||||
mu sync.RWMutex
|
||||
loader dashboardLoader
|
||||
perOrgReader map[int64]*bluge.Reader // orgId -> bluge reader
|
||||
perOrgWriter map[int64]*bluge.Writer // orgId -> bluge writer
|
||||
eventStore eventStore
|
||||
logger log.Logger
|
||||
buildSignals chan int64
|
||||
extender DocumentExtender
|
||||
folderIdLookup folderUIDLookup
|
||||
}
|
||||
|
||||
func newDashboardIndex(dashLoader dashboardLoader, evStore eventStore, extender DocumentExtender) *dashboardIndex {
|
||||
func newDashboardIndex(dashLoader dashboardLoader, evStore eventStore, extender DocumentExtender, folderIDs folderUIDLookup) *dashboardIndex {
|
||||
return &dashboardIndex{
|
||||
loader: dashLoader,
|
||||
eventStore: evStore,
|
||||
perOrgReader: map[int64]*bluge.Reader{},
|
||||
perOrgWriter: map[int64]*bluge.Writer{},
|
||||
logger: log.New("dashboardIndex"),
|
||||
buildSignals: make(chan int64),
|
||||
extender: extender,
|
||||
loader: dashLoader,
|
||||
eventStore: evStore,
|
||||
perOrgReader: map[int64]*bluge.Reader{},
|
||||
perOrgWriter: map[int64]*bluge.Writer{},
|
||||
logger: log.New("dashboardIndex"),
|
||||
buildSignals: make(chan int64),
|
||||
extender: extender,
|
||||
folderIdLookup: folderIDs,
|
||||
}
|
||||
}
|
||||
|
||||
@ -351,9 +357,9 @@ func (i *dashboardIndex) applyDashboardEvent(ctx context.Context, orgID int64, d
|
||||
|
||||
// In the future we can rely on operation types to reduce work here.
|
||||
if len(dbDashboards) == 0 {
|
||||
newReader, err = i.removeDashboard(writer, reader, dashboardUID)
|
||||
newReader, err = i.removeDashboard(ctx, writer, reader, dashboardUID)
|
||||
} else {
|
||||
newReader, err = i.updateDashboard(orgID, writer, reader, dbDashboards[0])
|
||||
newReader, err = i.updateDashboard(ctx, orgID, writer, reader, dbDashboards[0])
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -362,7 +368,7 @@ func (i *dashboardIndex) applyDashboardEvent(ctx context.Context, orgID int64, d
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *dashboardIndex) removeDashboard(writer *bluge.Writer, reader *bluge.Reader, dashboardUID string) (*bluge.Reader, error) {
|
||||
func (i *dashboardIndex) removeDashboard(_ context.Context, writer *bluge.Writer, reader *bluge.Reader, dashboardUID string) (*bluge.Reader, error) {
|
||||
// Find all panel docs to remove with dashboard.
|
||||
panelIDs, err := getDashboardPanelIDs(reader, dashboardUID)
|
||||
if err != nil {
|
||||
@ -392,7 +398,7 @@ func stringInSlice(str string, slice []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (i *dashboardIndex) updateDashboard(orgID int64, writer *bluge.Writer, reader *bluge.Reader, dash dashboard) (*bluge.Reader, error) {
|
||||
func (i *dashboardIndex) updateDashboard(ctx context.Context, orgID int64, writer *bluge.Writer, reader *bluge.Reader, dash dashboard) (*bluge.Reader, error) {
|
||||
batch := bluge.NewBatch()
|
||||
|
||||
extendDoc := i.extender.GetDashboardExtender(orgID, dash.uid)
|
||||
@ -409,7 +415,7 @@ func (i *dashboardIndex) updateDashboard(orgID int64, writer *bluge.Writer, read
|
||||
folderUID = "general"
|
||||
} else {
|
||||
var err error
|
||||
folderUID, err = getDashboardFolderUID(reader, dash.folderID)
|
||||
folderUID, err = i.folderIdLookup(ctx, dash.folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -547,6 +553,27 @@ func (l sqlDashboardLoader) LoadDashboards(ctx context.Context, orgID int64, das
|
||||
return dashboards, err
|
||||
}
|
||||
|
||||
func newFolderIDLookup(sql *sqlstore.SQLStore) folderUIDLookup {
|
||||
return func(ctx context.Context, folderID int64) (string, error) {
|
||||
uid := ""
|
||||
err := sql.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
|
||||
sess.Table("dashboard").
|
||||
Where("id = ?", folderID).
|
||||
Cols("uid")
|
||||
|
||||
res, err := sess.Query("SELECT uid FROM dashboard WHERE id=?", folderID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(res) > 0 {
|
||||
uid = string(res[0]["uid"])
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return uid, err
|
||||
}
|
||||
}
|
||||
|
||||
type dashboardQueryResult struct {
|
||||
Id int64
|
||||
Uid string
|
||||
|
@ -48,7 +48,11 @@ func initTestIndexFromDashesExtended(t *testing.T, dashboards []dashboard, exten
|
||||
dashboardLoader := &testDashboardLoader{
|
||||
dashboards: dashboards,
|
||||
}
|
||||
index := newDashboardIndex(dashboardLoader, &store.MockEntityEventsService{}, extender)
|
||||
index := newDashboardIndex(
|
||||
dashboardLoader,
|
||||
&store.MockEntityEventsService{},
|
||||
extender,
|
||||
func(ctx context.Context, folderId int64) (string, error) { return "x", nil })
|
||||
require.NotNil(t, index)
|
||||
numDashboards, err := index.buildOrgIndex(context.Background(), testOrgID)
|
||||
require.NoError(t, err)
|
||||
@ -110,7 +114,7 @@ func TestDashboardIndexUpdates(t *testing.T) {
|
||||
t.Run("dashboard-delete", func(t *testing.T) {
|
||||
index, reader, writer := initTestIndexFromDashes(t, testDashboards)
|
||||
|
||||
newReader, err := index.removeDashboard(writer, reader, "2")
|
||||
newReader, err := index.removeDashboard(context.Background(), writer, reader, "2")
|
||||
require.NoError(t, err)
|
||||
|
||||
checkSearchResponse(t, filepath.Base(t.Name())+".txt", newReader, testAllowAllFilter,
|
||||
@ -121,7 +125,7 @@ func TestDashboardIndexUpdates(t *testing.T) {
|
||||
t.Run("dashboard-create", func(t *testing.T) {
|
||||
index, reader, writer := initTestIndexFromDashes(t, testDashboards)
|
||||
|
||||
newReader, err := index.updateDashboard(testOrgID, writer, reader, dashboard{
|
||||
newReader, err := index.updateDashboard(context.Background(), testOrgID, writer, reader, dashboard{
|
||||
id: 3,
|
||||
uid: "3",
|
||||
info: &extract.DashboardInfo{
|
||||
@ -138,7 +142,7 @@ func TestDashboardIndexUpdates(t *testing.T) {
|
||||
t.Run("dashboard-update", func(t *testing.T) {
|
||||
index, reader, writer := initTestIndexFromDashes(t, testDashboards)
|
||||
|
||||
newReader, err := index.updateDashboard(testOrgID, writer, reader, dashboard{
|
||||
newReader, err := index.updateDashboard(context.Background(), testOrgID, writer, reader, dashboard{
|
||||
id: 2,
|
||||
uid: "2",
|
||||
info: &extract.DashboardInfo{
|
||||
|
@ -39,9 +39,14 @@ func ProvideService(cfg *setting.Cfg, sql *sqlstore.SQLStore, entityEventStore s
|
||||
sql: sql,
|
||||
ac: ac,
|
||||
},
|
||||
dashboardIndex: newDashboardIndex(newSQLDashboardLoader(sql), entityEventStore, extender.GetDocumentExtender()),
|
||||
logger: log.New("searchV2"),
|
||||
extender: extender,
|
||||
dashboardIndex: newDashboardIndex(
|
||||
newSQLDashboardLoader(sql),
|
||||
entityEventStore,
|
||||
extender.GetDocumentExtender(),
|
||||
newFolderIDLookup(sql),
|
||||
),
|
||||
logger: log.New("searchV2"),
|
||||
extender: extender,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user