Storage: Remove entity store (#91233)

This commit is contained in:
Ryan McKinley 2024-07-31 09:25:39 +03:00 committed by GitHub
parent 10170cb839
commit 160fe2a3a4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
92 changed files with 65 additions and 13340 deletions

View File

@ -385,7 +385,6 @@ protobuf: ## Compile protobuf definitions
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.4.0
buf generate pkg/plugins/backendplugin/pluginextensionv2 --template pkg/plugins/backendplugin/pluginextensionv2/buf.gen.yaml
buf generate pkg/plugins/backendplugin/secretsmanagerplugin --template pkg/plugins/backendplugin/secretsmanagerplugin/buf.gen.yaml
buf generate pkg/services/store/entity --template pkg/services/store/entity/buf.gen.yaml
buf generate pkg/storage/unified/resource --template pkg/storage/unified/resource/buf.gen.yaml
.PHONY: clean

View File

@ -15,7 +15,6 @@ import (
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/apis/folder/v0alpha1"
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
"github.com/grafana/grafana/pkg/services/apiserver/storage/entity"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/folder"
"github.com/grafana/grafana/pkg/util"
@ -67,16 +66,16 @@ func (s *legacyStorage) List(ctx context.Context, options *internalversion.ListO
}
parentUID := ""
// translate grafana.app/* label selectors into field requirements
requirements, newSelector, err := entity.ReadLabelSelectors(options.LabelSelector)
if err != nil {
return nil, err
}
if requirements.Folder != nil {
parentUID = *requirements.Folder
}
// Update the selector to remove the unneeded requirements
options.LabelSelector = newSelector
// // translate grafana.app/* label selectors into field requirements
// requirements, newSelector, err := entity.ReadLabelSelectors(options.LabelSelector)
// if err != nil {
// return nil, err
// }
// if requirements.Folder != nil {
// parentUID = *requirements.Folder
// }
// // Update the selector to remove the unneeded requirements
// options.LabelSelector = newSelector
paging, err := readContinueToken(options)
if err != nil {

View File

@ -41,7 +41,6 @@ import (
"github.com/grafana/grafana/pkg/services/ssosettings"
"github.com/grafana/grafana/pkg/services/ssosettings/ssosettingsimpl"
"github.com/grafana/grafana/pkg/services/store"
"github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/services/store/sanitizer"
"github.com/grafana/grafana/pkg/services/supportbundles/supportbundlesimpl"
"github.com/grafana/grafana/pkg/services/team/teamapi"
@ -68,7 +67,7 @@ func ProvideBackgroundServiceRegistry(
_ dashboardsnapshots.Service,
_ serviceaccounts.Service, _ *guardian.Provider,
_ *plugindashboardsservice.DashboardUpdater, _ *sanitizer.Provider,
_ *grpcserver.HealthService, _ entity.EntityStoreServer, _ authz.Client, _ *grpcserver.ReflectionService,
_ *grpcserver.HealthService, _ authz.Client, _ *grpcserver.ReflectionService,
_ *ldapapi.Service, _ *apiregistry.Service, _ auth.IDService, _ *teamapi.TeamAPI, _ ssosettings.Service,
_ cloudmigration.Service, _ authnimpl.Registration,
) *BackgroundServiceRegistry {

View File

@ -137,10 +137,6 @@ import (
"github.com/grafana/grafana/pkg/services/star/starimpl"
"github.com/grafana/grafana/pkg/services/stats/statsimpl"
"github.com/grafana/grafana/pkg/services/store"
entityStore "github.com/grafana/grafana/pkg/services/store/entity"
entityDB "github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/services/store/entity/db/dbimpl"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
"github.com/grafana/grafana/pkg/services/store/resolver"
"github.com/grafana/grafana/pkg/services/store/sanitizer"
"github.com/grafana/grafana/pkg/services/supportbundles"
@ -335,10 +331,6 @@ var wireBasicSet = wire.NewSet(
grpcserver.ProvideHealthService,
grpcserver.ProvideReflectionService,
interceptors.ProvideAuthenticator,
dbimpl.ProvideEntityDB,
wire.Bind(new(entityDB.EntityDBInterface), new(*dbimpl.EntityDB)),
sqlstash.ProvideSQLEntityServer,
wire.Bind(new(entityStore.EntityStoreServer), new(sqlstash.SqlEntityServer)),
resolver.ProvideEntityReferenceResolver,
teamimpl.ProvideService,
teamapi.ProvideTeamAPI,

View File

@ -1,90 +0,0 @@
// SPDX-License-Identifier: AGPL-3.0-only
package entity
import (
"encoding/json"
"path"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/storagebackend/factory"
flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request"
"k8s.io/client-go/tools/cache"
entityStore "github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/setting"
)
var _ generic.RESTOptionsGetter = (*RESTOptionsGetter)(nil)
type RESTOptionsGetter struct {
cfg *setting.Cfg
store entityStore.EntityStoreClient
Codec runtime.Codec
}
func NewRESTOptionsGetter(cfg *setting.Cfg, store entityStore.EntityStoreClient, codec runtime.Codec) *RESTOptionsGetter {
return &RESTOptionsGetter{
cfg: cfg,
store: store,
Codec: codec,
}
}
func (f *RESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) {
// build connection string to uniquely identify the storage backend
connectionInfo, err := json.Marshal(f.cfg.SectionWithEnvOverrides("entity_api").KeysHash())
if err != nil {
return generic.RESTOptions{}, err
}
storageConfig := &storagebackend.ConfigForResource{
Config: storagebackend.Config{
Type: "custom",
Prefix: "",
Transport: storagebackend.TransportConfig{
ServerList: []string{
string(connectionInfo),
},
},
Codec: f.Codec,
EncodeVersioner: nil,
Transformer: nil,
CompactionInterval: 0,
CountMetricPollPeriod: 0,
DBMetricPollInterval: 0,
HealthcheckTimeout: 0,
ReadycheckTimeout: 0,
StorageObjectCountTracker: nil,
},
GroupResource: resource,
}
ret := generic.RESTOptions{
StorageConfig: storageConfig,
Decorator: func(
config *storagebackend.ConfigForResource,
resourcePrefix string,
keyFunc func(obj runtime.Object) (string, error),
newFunc func() runtime.Object,
newListFunc func() runtime.Object,
getAttrsFunc storage.AttrFunc,
trigger storage.IndexerFuncs,
indexers *cache.Indexers,
) (storage.Interface, factory.DestroyFunc, error) {
return NewStorage(config, resource, f.store, f.Codec, keyFunc, newFunc, newListFunc, getAttrsFunc)
},
DeleteCollectionWorkers: 0,
EnableGarbageCollection: false,
ResourcePrefix: path.Join(storageConfig.Prefix, resource.Group, resource.Resource),
CountMetricPollPeriod: 1 * time.Second,
StorageObjectCountTracker: flowcontrolrequest.NewStorageObjectCountTracker(),
}
return ret, nil
}

View File

@ -1,82 +0,0 @@
package entity
import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"github.com/grafana/grafana/pkg/apimachinery/utils"
)
const SortByKey = "grafana.app/sortBy"
const ListDeletedKey = "grafana.app/listDeleted"
const ListHistoryKey = "grafana.app/listHistory"
type Requirements struct {
// Equals folder
Folder *string
// SortBy is a list of fields to sort by
SortBy []string
// ListDeleted is a flag to list deleted entities
ListDeleted bool
// ListHistory is a resource name to list the history of
ListHistory string
}
func ReadLabelSelectors(selector labels.Selector) (Requirements, labels.Selector, error) {
requirements := Requirements{}
newSelector := labels.NewSelector()
if selector == nil {
return requirements, newSelector, nil
}
labelSelectors, _ := selector.Requirements()
for _, r := range labelSelectors {
switch r.Key() {
case utils.AnnoKeyFolder:
if (r.Operator() != selection.Equals) && (r.Operator() != selection.DoubleEquals) {
return requirements, newSelector, apierrors.NewBadRequest(utils.AnnoKeyFolder + " label selector only supports equality")
}
folder := r.Values().List()[0]
requirements.Folder = &folder
case SortByKey:
if r.Operator() != selection.In {
return requirements, newSelector, apierrors.NewBadRequest(SortByKey + " label selector only supports in")
}
requirements.SortBy = r.Values().List()
case ListDeletedKey:
if r.Operator() != selection.Equals {
return requirements, newSelector, apierrors.NewBadRequest(ListDeletedKey + " label selector only supports equality")
}
if len(r.Values().List()) != 1 {
return requirements, newSelector, apierrors.NewBadRequest(ListDeletedKey + " label selector only supports one value")
}
if r.Values().List()[0] != "true" && r.Values().List()[0] != "false" {
return requirements, newSelector, apierrors.NewBadRequest(ListDeletedKey + " label selector only supports true or false")
}
requirements.ListDeleted = r.Values().List()[0] == "true"
case ListHistoryKey:
if r.Operator() != selection.Equals {
return requirements, newSelector, apierrors.NewBadRequest(ListHistoryKey + " label selector only supports equality")
}
if len(r.Values().List()) != 1 {
return requirements, newSelector, apierrors.NewBadRequest(ListHistoryKey + " label selector only supports one value")
}
if r.Values().List()[0] == "" {
return requirements, newSelector, apierrors.NewBadRequest(ListHistoryKey + " label selector must not be empty")
}
requirements.ListHistory = r.Values().List()[0]
// add all unregonized label selectors to the new selector list, these will be processed by the entity store
default:
newSelector = newSelector.Add(r)
}
}
if requirements.ListDeleted && requirements.ListHistory != "" {
return requirements, newSelector, apierrors.NewBadRequest("cannot list deleted and history at the same time")
}
return requirements, newSelector, nil
}

View File

@ -1,753 +0,0 @@
// SPDX-License-Identifier: AGPL-3.0-only
// Provenance-includes-location: https://github.com/kubernetes-sigs/apiserver-runtime/blob/main/pkg/experimental/storage/filepath/jsonfile_rest.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: The Kubernetes Authors.
package entity
import (
"context"
"errors"
"fmt"
"io"
"reflect"
"strconv"
grpcCodes "google.golang.org/grpc/codes"
grpcStatus "google.golang.org/grpc/status"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/storagebackend/factory"
"k8s.io/klog/v2"
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
entityStore "github.com/grafana/grafana/pkg/services/store/entity"
)
var _ storage.Interface = (*Storage)(nil)
// Storage implements storage.Interface and stores resources in unified storage
type Storage struct {
config *storagebackend.ConfigForResource
store entityStore.EntityStoreClient
gr schema.GroupResource
codec runtime.Codec
keyFunc func(obj runtime.Object) (string, error)
newFunc func() runtime.Object
newListFunc func() runtime.Object
getAttrsFunc storage.AttrFunc
// trigger storage.IndexerFuncs
// indexers *cache.Indexers
}
func NewStorage(
config *storagebackend.ConfigForResource,
gr schema.GroupResource,
store entityStore.EntityStoreClient,
codec runtime.Codec,
keyFunc func(obj runtime.Object) (string, error),
newFunc func() runtime.Object,
newListFunc func() runtime.Object,
getAttrsFunc storage.AttrFunc,
) (storage.Interface, factory.DestroyFunc, error) {
return &Storage{
config: config,
gr: gr,
codec: codec,
store: store,
keyFunc: keyFunc,
newFunc: newFunc,
newListFunc: newListFunc,
getAttrsFunc: getAttrsFunc,
}, nil, nil
}
// Create adds a new object at a key unless it already exists. 'ttl' is time-to-live
// in seconds (0 means forever). If no error is returned and out is not nil, out will be
// set to the read value from database.
func (s *Storage) Create(ctx context.Context, key string, obj runtime.Object, out runtime.Object, ttl uint64) error {
k, err := grafanaregistry.ParseKey(key)
if err != nil {
return err
}
if err := s.Versioner().PrepareObjectForStorage(obj); err != nil {
return err
}
e, err := resourceToEntity(obj, *k, s.codec)
if err != nil {
return err
}
req := &entityStore.CreateEntityRequest{
Entity: e,
}
rsp, err := s.store.Create(ctx, req)
if err != nil {
return err
}
if rsp.Status != entityStore.CreateEntityResponse_CREATED {
return fmt.Errorf("this was not a create operation... (%s)", rsp.Status.String())
}
err = EntityToRuntimeObject(rsp.Entity, out, s.codec)
if err != nil {
return apierrors.NewInternalError(err)
}
return nil
}
// Delete removes the specified key and returns the value that existed at that spot.
// If key didn't exist, it will return NotFound storage error.
// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the
// current version of the object to avoid read operation from storage to get it.
// However, the implementations have to retry in case suggestion is stale.
func (s *Storage) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
k, err := grafanaregistry.ParseKey(key)
if err != nil {
return err
}
previousVersion := int64(0)
if preconditions != nil && preconditions.ResourceVersion != nil {
previousVersion, _ = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64)
}
rsp, err := s.store.Delete(ctx, &entityStore.DeleteEntityRequest{
Key: k.String(),
PreviousVersion: previousVersion,
})
if err != nil {
return err
}
err = EntityToRuntimeObject(rsp.Entity, out, s.codec)
if err != nil {
return apierrors.NewInternalError(err)
}
return nil
}
// Watch begins watching the specified key. Events are decoded into API objects,
// and any items selected by 'p' are sent down to returned watch.Interface.
// resourceVersion may be used to specify what version to begin watching,
// which should be the current resourceVersion, and no longer rv+1
// (e.g. reconnecting without missing any updates).
// If resource version is "0", this interface will get current object at given key
// and send it in an "ADDED" event, before watch starts.
func (s *Storage) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
k, err := grafanaregistry.ParseKey(key)
if err != nil {
return nil, err
}
if opts.Predicate.Field != nil {
// check for metadata.name field selector
if v, ok := opts.Predicate.Field.RequiresExactMatch("metadata.name"); ok && k.Name == "" {
// just watch the specific key if we have a name field selector
k.Name = v
}
// check for metadata.namespace field selector
if v, ok := opts.Predicate.Field.RequiresExactMatch("metadata.namespace"); ok && k.Namespace == "" {
// just watch the specific namespace if we have a namespace field selector
k.Namespace = v
}
}
// translate grafana.app/* label selectors into field requirements
requirements, newSelector, err := ReadLabelSelectors(opts.Predicate.Label)
if err != nil {
return nil, err
}
// Update the selector to remove the unneeded requirements
opts.Predicate.Label = newSelector
// if we got a listHistory label selector, watch the specified resource
if requirements.ListHistory != "" {
if k.Name != "" && k.Name != requirements.ListHistory {
return nil, apierrors.NewBadRequest("name field selector does not match listHistory")
}
k.Name = requirements.ListHistory
}
req := &entityStore.EntityWatchRequest{
Action: entityStore.EntityWatchRequest_START,
Key: []string{
k.String(),
},
Labels: map[string]string{},
WithBody: true,
WithStatus: true,
SendInitialEvents: false,
AllowWatchBookmarks: opts.Predicate.AllowWatchBookmarks,
}
if opts.ResourceVersion != "" {
rv, err := strconv.ParseInt(opts.ResourceVersion, 10, 64)
if err != nil {
return nil, apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %s", opts.ResourceVersion))
}
req.Since = rv
}
if opts.SendInitialEvents == nil && req.Since == 0 {
req.SendInitialEvents = true
} else if opts.SendInitialEvents != nil {
req.SendInitialEvents = *opts.SendInitialEvents
}
if requirements.Folder != nil {
req.Folder = *requirements.Folder
}
// translate "equals" label selectors to storage label conditions
labelRequirements, selectable := opts.Predicate.Label.Requirements()
if !selectable {
return nil, apierrors.NewBadRequest("label selector is not selectable")
}
for _, r := range labelRequirements {
if r.Operator() == selection.Equals {
req.Labels[r.Key()] = r.Values().List()[0]
}
}
client, err := s.store.Watch(ctx)
if err != nil {
// if the context was canceled, just return a new empty watch
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || errors.Is(err, io.EOF) {
return watch.NewEmptyWatch(), nil
}
return nil, err
}
err = client.Send(req)
if err != nil {
err2 := client.CloseSend()
if err2 != nil {
klog.Errorf("watch close failed: %s\n", err2)
}
// if the context was canceled, just return a new empty watch
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) || errors.Is(err, io.EOF) {
return watch.NewEmptyWatch(), nil
}
return nil, err
}
reporter := apierrors.NewClientErrorReporter(500, "WATCH", "")
decoder := &Decoder{
client: client,
newFunc: s.newFunc,
opts: opts,
codec: s.codec,
}
w := watch.NewStreamWatcher(decoder, reporter)
return w, nil
}
// Get unmarshals object found at key into objPtr. On a not found error, will either
// return a zero object of the requested type, or an error, depending on 'opts.ignoreNotFound'.
// Treats empty responses and nil response nodes exactly like a not found error.
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
func (s *Storage) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error {
k, err := grafanaregistry.ParseKey(key)
if err != nil {
return err
}
resourceVersion := int64(0)
if opts.ResourceVersion != "" {
resourceVersion, err = strconv.ParseInt(opts.ResourceVersion, 10, 64)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %s", opts.ResourceVersion))
}
}
rsp, err := s.store.Read(ctx, &entityStore.ReadEntityRequest{
Key: k.String(),
WithBody: true,
WithStatus: true,
ResourceVersion: resourceVersion,
})
if err != nil {
return err
}
if rsp.Key == "" {
if opts.IgnoreNotFound {
return nil
}
return apierrors.NewNotFound(s.gr, k.Name)
}
err = EntityToRuntimeObject(rsp, objPtr, s.codec)
if err != nil {
return apierrors.NewInternalError(err)
}
return nil
}
// GetList unmarshalls objects found at key into a *List api object (an object
// that satisfies runtime.IsList definition).
// If 'opts.Recursive' is false, 'key' is used as an exact match. If `opts.Recursive'
// is true, 'key' is used as a prefix.
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
func (s *Storage) GetList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
k, err := grafanaregistry.ParseKey(key)
if err != nil {
return err
}
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil {
return err
}
// translate grafana.app/* label selectors into field requirements
requirements, newSelector, err := ReadLabelSelectors(opts.Predicate.Label)
if err != nil {
return err
}
// Update the selector to remove the unneeded requirements
opts.Predicate.Label = newSelector
if requirements.ListHistory != "" {
k.Name = requirements.ListHistory
req := &entityStore.EntityHistoryRequest{
Key: k.String(),
WithBody: true,
WithStatus: true,
NextPageToken: opts.Predicate.Continue,
Limit: opts.Predicate.Limit,
Sort: requirements.SortBy,
}
rsp, err := s.store.History(ctx, req)
if err != nil {
return apierrors.NewInternalError(err)
}
for _, r := range rsp.Versions {
res := s.newFunc()
err := EntityToRuntimeObject(r, res, s.codec)
if err != nil {
return apierrors.NewInternalError(err)
}
// apply any predicates not handled in storage
matches, err := opts.Predicate.Matches(res)
if err != nil {
return apierrors.NewInternalError(err)
}
if !matches {
continue
}
v.Set(reflect.Append(v, reflect.ValueOf(res).Elem()))
}
listAccessor, err := meta.ListAccessor(listObj)
if err != nil {
return err
}
if rsp.NextPageToken != "" {
listAccessor.SetContinue(rsp.NextPageToken)
}
listAccessor.SetResourceVersion(strconv.FormatInt(rsp.ResourceVersion, 10))
return nil
}
req := &entityStore.EntityListRequest{
Key: []string{
k.String(),
},
WithBody: true,
WithStatus: true,
NextPageToken: opts.Predicate.Continue,
Limit: opts.Predicate.Limit,
Labels: map[string]string{},
}
if requirements.Folder != nil {
req.Folder = *requirements.Folder
}
if len(requirements.SortBy) > 0 {
req.Sort = requirements.SortBy
}
if requirements.ListDeleted {
req.Deleted = true
}
// translate "equals" label selectors to storage label conditions
labelRequirements, selectable := opts.Predicate.Label.Requirements()
if !selectable {
return apierrors.NewBadRequest("label selector is not selectable")
}
for _, r := range labelRequirements {
if r.Operator() == selection.Equals {
req.Labels[r.Key()] = r.Values().List()[0]
}
}
rsp, err := s.store.List(ctx, req)
if err != nil {
return apierrors.NewInternalError(err)
}
for _, r := range rsp.Results {
res := s.newFunc()
err := EntityToRuntimeObject(r, res, s.codec)
if err != nil {
return apierrors.NewInternalError(err)
}
// apply any predicates not handled in storage
matches, err := opts.Predicate.Matches(res)
if err != nil {
return apierrors.NewInternalError(err)
}
if !matches {
continue
}
v.Set(reflect.Append(v, reflect.ValueOf(res).Elem()))
}
listAccessor, err := meta.ListAccessor(listObj)
if err != nil {
return err
}
if rsp.NextPageToken != "" {
listAccessor.SetContinue(rsp.NextPageToken)
}
listAccessor.SetResourceVersion(strconv.FormatInt(rsp.ResourceVersion, 10))
return nil
}
// GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'destination')
// retrying the update until success if there is index conflict.
// Note that object passed to tryUpdate may change across invocations of tryUpdate() if
// other writers are simultaneously updating it, so tryUpdate() needs to take into account
// the current contents of the object when deciding how the update object should look.
// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false
// else `destination` will be set to the zero value of it's type.
// If the eventual successful invocation of `tryUpdate` returns an output with the same serialized
// contents as the input, it won't perform any update, but instead set `destination` to an object with those
// contents.
// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the
// current version of the object to avoid read operation from storage to get it.
// However, the implementations have to retry in case suggestion is stale.
func (s *Storage) GuaranteedUpdate(
ctx context.Context,
key string,
destination runtime.Object,
ignoreNotFound bool,
preconditions *storage.Preconditions,
tryUpdate storage.UpdateFunc,
cachedExistingObject runtime.Object,
) error {
k, err := grafanaregistry.ParseKey(key)
if err != nil {
return err
}
getErr := s.Get(ctx, k.String(), storage.GetOptions{}, destination)
if getErr != nil {
if ignoreNotFound && apierrors.IsNotFound(getErr) {
// destination is already set to zero value
// we'll create the resource
} else {
return getErr
}
}
accessor, err := meta.Accessor(destination)
if err != nil {
return err
}
previousVersion, _ := strconv.ParseInt(accessor.GetResourceVersion(), 10, 64)
if preconditions != nil && preconditions.ResourceVersion != nil {
previousVersion, _ = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64)
}
res := &storage.ResponseMeta{}
updatedObj, _, err := tryUpdate(destination, *res)
if err != nil {
var statusErr *apierrors.StatusError
if errors.As(err, &statusErr) {
// For now, forbidden may come from a mutation handler
if statusErr.ErrStatus.Reason == metav1.StatusReasonForbidden {
return statusErr
}
}
return apierrors.NewInternalError(fmt.Errorf("could not successfully update object. key=%s, err=%s", k.String(), err.Error()))
}
e, err := resourceToEntity(updatedObj, *k, s.codec)
if err != nil {
return err
}
// if we have a non-nil getErr, then we've ignored a not found error
if getErr != nil {
// object does not exist, create it
req := &entityStore.CreateEntityRequest{
Entity: e,
}
rsp, err := s.store.Create(ctx, req)
if err != nil {
return err
}
err = EntityToRuntimeObject(rsp.Entity, destination, s.codec)
if err != nil {
return apierrors.NewInternalError(err)
}
return nil
}
// update the existing object
req := &entityStore.UpdateEntityRequest{
Entity: e,
PreviousVersion: previousVersion,
}
rsp, err := s.store.Update(ctx, req)
if err != nil {
return err // continue???
}
if rsp.Status == entityStore.UpdateEntityResponse_UNCHANGED {
return nil // destination is already set
}
err = EntityToRuntimeObject(rsp.Entity, destination, s.codec)
if err != nil {
return apierrors.NewInternalError(err)
}
return nil
}
// Count returns number of different entries under the key (generally being path prefix).
func (s *Storage) Count(key string) (int64, error) {
return 0, nil
}
func (s *Storage) Versioner() storage.Versioner {
return &storage.APIObjectVersioner{}
}
func (s *Storage) RequestWatchProgress(ctx context.Context) error {
return nil
}
type Decoder struct {
client entityStore.EntityStore_WatchClient
newFunc func() runtime.Object
opts storage.ListOptions
codec runtime.Codec
}
func (d *Decoder) Decode() (action watch.EventType, object runtime.Object, err error) {
decode:
for {
err := d.client.Context().Err()
if err != nil {
klog.Errorf("client: context error: %s\n", err)
return watch.Error, nil, err
}
resp, err := d.client.Recv()
if errors.Is(err, io.EOF) {
return watch.Error, nil, err
}
if grpcStatus.Code(err) == grpcCodes.Canceled {
return watch.Error, nil, err
}
if err != nil {
klog.Errorf("client: error receiving result: %s", err)
return watch.Error, nil, err
}
if resp.Entity == nil {
klog.Errorf("client: received nil entity\n")
continue decode
}
obj := d.newFunc()
if resp.Entity.Action == entityStore.Entity_BOOKMARK {
// here k8s expects an empty object with just resource version and k8s.io/initial-events-end annotation
accessor, err := meta.Accessor(obj)
if err != nil {
klog.Errorf("error getting object accessor: %s", err)
return watch.Error, nil, err
}
accessor.SetResourceVersion(fmt.Sprintf("%d", resp.Entity.ResourceVersion))
accessor.SetAnnotations(map[string]string{"k8s.io/initial-events-end": "true"})
return watch.Bookmark, obj, nil
}
err = EntityToRuntimeObject(resp.Entity, obj, d.codec)
if err != nil {
klog.Errorf("error decoding entity: %s", err)
return watch.Error, nil, err
}
var watchAction watch.EventType
switch resp.Entity.Action {
case entityStore.Entity_CREATED:
// apply any predicates not handled in storage
matches, err := d.opts.Predicate.Matches(obj)
if err != nil {
klog.Errorf("error matching object: %s", err)
return watch.Error, nil, err
}
if !matches {
continue decode
}
watchAction = watch.Added
case entityStore.Entity_UPDATED:
watchAction = watch.Modified
// apply any predicates not handled in storage
matches, err := d.opts.Predicate.Matches(obj)
if err != nil {
klog.Errorf("error matching object: %s", err)
return watch.Error, nil, err
}
// if we have a previous object, check if it matches
prevMatches := false
prevObj := d.newFunc()
if resp.Previous != nil {
err = EntityToRuntimeObject(resp.Previous, prevObj, d.codec)
if err != nil {
klog.Errorf("error decoding entity: %s", err)
return watch.Error, nil, err
}
// apply any predicates not handled in storage
prevMatches, err = d.opts.Predicate.Matches(prevObj)
if err != nil {
klog.Errorf("error matching object: %s", err)
return watch.Error, nil, err
}
}
if !matches {
if !prevMatches {
continue decode
}
// if the object didn't match, send a Deleted event
watchAction = watch.Deleted
// here k8s expects the previous object but with the new resource version
obj = prevObj
accessor, err := meta.Accessor(obj)
if err != nil {
klog.Errorf("error getting object accessor: %s", err)
return watch.Error, nil, err
}
accessor.SetResourceVersion(fmt.Sprintf("%d", resp.Entity.ResourceVersion))
} else if !prevMatches {
// if the object didn't previously match, send an Added event
watchAction = watch.Added
}
case entityStore.Entity_DELETED:
watchAction = watch.Deleted
// if we have a previous object, return that in the deleted event
if resp.Previous != nil {
err = EntityToRuntimeObject(resp.Previous, obj, d.codec)
if err != nil {
klog.Errorf("error decoding entity: %s", err)
return watch.Error, nil, err
}
// here k8s expects the previous object but with the new resource version
accessor, err := meta.Accessor(obj)
if err != nil {
klog.Errorf("error getting object accessor: %s", err)
return watch.Error, nil, err
}
accessor.SetResourceVersion(fmt.Sprintf("%d", resp.Entity.ResourceVersion))
}
// apply any predicates not handled in storage
matches, err := d.opts.Predicate.Matches(obj)
if err != nil {
klog.Errorf("error matching object: %s", err)
return watch.Error, nil, err
}
if !matches {
continue decode
}
default:
watchAction = watch.Error
}
return watchAction, obj, nil
}
}
func (d *Decoder) Close() {
err := d.client.CloseSend()
if err != nil {
klog.Errorf("error closing watch stream: %s", err)
}
}
var _ watch.Decoder = (*Decoder)(nil)

View File

@ -1,376 +0,0 @@
// SPDX-License-Identifier: AGPL-3.0-only
// Provenance-includes-location: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher_test.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: The Kubernetes Authors.
package test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/api/apitesting"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/apis/example"
examplev1 "k8s.io/apiserver/pkg/apis/example/v1"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/storagebackend/factory"
"github.com/grafana/grafana/pkg/apimachinery/identity"
storagetesting "github.com/grafana/grafana/pkg/apiserver/storage/testing"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/apiserver/storage/entity"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/sqlstore"
entityStore "github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/services/store/entity/db/dbimpl"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/tests/testinfra"
"github.com/grafana/grafana/pkg/tests/testsuite"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
func TestMain(m *testing.M) {
testsuite.Run(m)
}
func createTestContext(t *testing.T) (entityStore.EntityStoreClient, factory.DestroyFunc) {
t.Helper()
grafDir, cfgPath := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{
featuremgmt.FlagGrpcServer,
featuremgmt.FlagUnifiedStorage,
},
AppModeProduction: false, // required for migrations to run
GRPCServerAddress: "127.0.0.1:0", // :0 for choosing the port automatically
})
cfg, err := setting.NewCfgFromArgs(setting.CommandLineArgs{Config: cfgPath, HomePath: grafDir})
assert.NoError(t, err)
featureManager, err := featuremgmt.ProvideManagerService(cfg)
assert.NoError(t, err)
featureToggles := featuremgmt.ProvideToggles(featureManager)
db := sqlstore.InitTestDBWithMigration(t, nil, sqlstore.InitTestDBOpt{EnsureDefaultOrgAndUser: false})
require.NoError(t, err)
eDB, err := dbimpl.ProvideEntityDB(db, cfg, featureToggles, nil)
require.NoError(t, err)
err = eDB.Init()
require.NoError(t, err)
traceConfig, err := tracing.ParseTracingConfig(cfg)
require.NoError(t, err)
tracer, err := tracing.ProvideService(traceConfig)
require.NoError(t, err)
store, err := sqlstash.ProvideSQLEntityServer(eDB, tracer)
require.NoError(t, err)
client := entityStore.NewEntityStoreClientLocal(store)
return client, func() { store.Stop() }
}
func init() {
metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion)
utilruntime.Must(example.AddToScheme(scheme))
utilruntime.Must(examplev1.AddToScheme(scheme))
}
type setupOptions struct {
codec runtime.Codec
newFunc func() runtime.Object
newListFunc func() runtime.Object
prefix string
resourcePrefix string
groupResource schema.GroupResource
}
type setupOption func(*setupOptions, *testing.T)
func withDefaults(options *setupOptions, t *testing.T) {
options.codec = apitesting.TestCodec(codecs, examplev1.SchemeGroupVersion)
options.newFunc = newPod
options.newListFunc = newPodList
options.prefix = t.TempDir()
options.resourcePrefix = "/resource/pods"
options.groupResource = schema.GroupResource{Resource: "pods"}
}
var _ setupOption = withDefaults
func testSetup(t *testing.T, opts ...setupOption) (context.Context, storage.Interface, factory.DestroyFunc, error) {
setupOpts := setupOptions{}
opts = append([]setupOption{withDefaults}, opts...)
for _, opt := range opts {
opt(&setupOpts, t)
}
config := storagebackend.NewDefaultConfig(setupOpts.prefix, setupOpts.codec)
client, destroyFunc := createTestContext(t)
store, _, err := entity.NewStorage(
config.ForResource(setupOpts.groupResource),
setupOpts.groupResource,
client,
setupOpts.codec,
func(obj runtime.Object) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return "", err
}
return storagetesting.KeyFunc(accessor.GetNamespace(), accessor.GetName()), nil
},
setupOpts.newFunc,
setupOpts.newListFunc,
storage.DefaultNamespaceScopedAttr,
)
if err != nil {
return nil, nil, nil, err
}
// Test with an admin identity
ctx := identity.WithRequester(context.Background(), &identity.StaticRequester{
Type: identity.TypeUser,
Login: "testuser",
UserID: 123,
UserUID: "u123",
OrgRole: identity.RoleAdmin,
IsGrafanaAdmin: true, // can do anything
})
return ctx, store, destroyFunc, nil
}
func TestIntegrationWatch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestWatch(ctx, t, store)
}
func TestIntegrationClusterScopedWatch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestClusterScopedWatch(ctx, t, store)
}
func TestIntegrationNamespaceScopedWatch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestNamespaceScopedWatch(ctx, t, store)
}
func TestIntegrationDeleteTriggerWatch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestDeleteTriggerWatch(ctx, t, store)
}
func TestIntegrationWatchFromZero(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestWatchFromZero(ctx, t, store, nil)
}
// TestWatchFromNonZero tests that
// - watch from non-0 should just watch changes after given version
func TestIntegrationWatchFromNonZero(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestWatchFromNonZero(ctx, t, store)
}
/*
// TODO this times out, we need to buffer events
func TestIntegrationDelayedWatchDelivery(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestDelayedWatchDelivery(ctx, t, store)
}
*/
/* func TestIntegrationWatchError(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
ctx, store, _ := testSetup(t)
storagetesting.RunTestWatchError(ctx, t, &storeWithPrefixTransformer{store})
} */
func TestIntegrationWatchContextCancel(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestWatchContextCancel(ctx, t, store)
}
func TestIntegrationWatcherTimeout(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestWatcherTimeout(ctx, t, store)
}
func TestIntegrationWatchDeleteEventObjectHaveLatestRV(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestWatchDeleteEventObjectHaveLatestRV(ctx, t, store)
}
// TODO: enable when we support flow control and priority fairness
/* func TestIntegrationWatchInitializationSignal(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestWatchInitializationSignal(ctx, t, store)
} */
/* func TestIntegrationProgressNotify(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunOptionalTestProgressNotify(ctx, t, store)
} */
// TestWatchDispatchBookmarkEvents makes sure that
// setting allowWatchBookmarks query param against
// etcd implementation doesn't have any effect.
func TestIntegrationWatchDispatchBookmarkEvents(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunTestWatchDispatchBookmarkEvents(ctx, t, store, false)
}
func TestIntegrationSendInitialEventsBackwardCompatibility(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunSendInitialEventsBackwardCompatibility(ctx, t, store)
}
// TODO this test times out
func TestIntegrationEtcdWatchSemantics(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
t.Skip("In maintenance")
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunWatchSemantics(ctx, t, store)
}
/*
// TODO this test times out
func TestIntegrationEtcdWatchSemanticInitialEventsExtended(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
ctx, store, destroyFunc, err := testSetup(t)
defer destroyFunc()
assert.NoError(t, err)
storagetesting.RunWatchSemanticInitialEventsExtended(ctx, t, store)
}
*/
func newPod() runtime.Object {
return &example.Pod{}
}
func newPodList() runtime.Object {
return &example.PodList{}
}

View File

@ -1,176 +0,0 @@
package entity
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strconv"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"github.com/grafana/grafana/pkg/apimachinery/utils"
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
entityStore "github.com/grafana/grafana/pkg/services/store/entity"
)
func EntityToRuntimeObject(rsp *entityStore.Entity, res runtime.Object, codec runtime.Codec) error {
var err error
// Read the body first -- it includes old resourceVersion!
if len(rsp.Body) > 0 {
decoded, _, err := codec.Decode(rsp.Body, &schema.GroupVersionKind{Group: rsp.Group, Version: rsp.GroupVersion}, res)
if err != nil {
return err
}
res = decoded
}
metaAccessor, err := meta.Accessor(res)
if err != nil {
return err
}
if len(rsp.Meta) > 0 {
err = json.Unmarshal(rsp.Meta, res)
if err != nil {
return err
}
}
metaAccessor.SetName(rsp.Name)
metaAccessor.SetNamespace(rsp.Namespace)
metaAccessor.SetUID(types.UID(rsp.Guid))
metaAccessor.SetResourceVersion(fmt.Sprintf("%d", rsp.ResourceVersion))
metaAccessor.SetCreationTimestamp(metav1.Unix(rsp.CreatedAt/1000, rsp.CreatedAt%1000*1000000))
grafanaAccessor, err := utils.MetaAccessor(metaAccessor)
if err != nil {
return err
}
if rsp.Folder != "" {
grafanaAccessor.SetFolder(rsp.Folder)
}
if rsp.CreatedBy != "" {
grafanaAccessor.SetCreatedBy(rsp.CreatedBy)
}
if rsp.UpdatedBy != "" {
grafanaAccessor.SetUpdatedBy(rsp.UpdatedBy)
}
if rsp.UpdatedAt != 0 {
updatedAt := time.UnixMilli(rsp.UpdatedAt).UTC()
grafanaAccessor.SetUpdatedTimestamp(&updatedAt)
}
grafanaAccessor.SetSlug(rsp.Slug)
if rsp.Origin != nil {
originTime := time.UnixMilli(rsp.Origin.Time).UTC()
grafanaAccessor.SetOriginInfo(&utils.ResourceOriginInfo{
Name: rsp.Origin.Source,
Path: rsp.Origin.Key, // Using "key" in the
// Path: rsp.Origin.Path,
Timestamp: &originTime,
})
}
if len(rsp.Labels) > 0 {
metaAccessor.SetLabels(rsp.Labels)
}
// TODO fields?
if len(rsp.Status) > 0 {
status := reflect.ValueOf(res).Elem().FieldByName("Status")
if status != (reflect.Value{}) && status.CanSet() {
err = json.Unmarshal(rsp.Status, status.Addr().Interface())
if err != nil {
return err
}
}
}
return nil
}
func resourceToEntity(res runtime.Object, k grafanaregistry.Key, codec runtime.Codec) (*entityStore.Entity, error) {
metaAccessor, err := meta.Accessor(res)
if err != nil {
return nil, err
}
grafanaAccessor, err := utils.MetaAccessor(metaAccessor)
if err != nil {
return nil, err
}
rv, _ := strconv.ParseInt(metaAccessor.GetResourceVersion(), 10, 64)
// add the object's name to the provided key
k.Name = metaAccessor.GetName()
rsp := &entityStore.Entity{
Group: k.Group,
GroupVersion: res.GetObjectKind().GroupVersionKind().Version,
Resource: k.Resource,
Namespace: k.Namespace,
Key: k.String(),
Name: k.Name,
Guid: string(metaAccessor.GetUID()),
ResourceVersion: rv,
Folder: grafanaAccessor.GetFolder(),
CreatedAt: metaAccessor.GetCreationTimestamp().Time.UnixMilli(),
CreatedBy: grafanaAccessor.GetCreatedBy(),
UpdatedBy: grafanaAccessor.GetUpdatedBy(),
Slug: grafanaAccessor.GetSlug(),
Title: grafanaAccessor.FindTitle(metaAccessor.GetName()),
Origin: &entityStore.EntityOriginInfo{
Source: grafanaAccessor.GetOriginName(),
// Deprecated: Keeping "key" in the protobuf to avoid migrations while a bigger one is in place
Key: grafanaAccessor.GetOriginPath(),
},
Labels: metaAccessor.GetLabels(),
}
t, err := grafanaAccessor.GetUpdatedTimestamp()
if err != nil {
return nil, err
}
if t != nil {
rsp.UpdatedAt = t.UnixMilli()
}
t, err = grafanaAccessor.GetOriginTimestamp()
if err != nil {
return nil, err
}
if t != nil {
rsp.Origin.Time = t.UnixMilli()
}
rsp.Meta, err = json.Marshal(meta.AsPartialObjectMetadata(metaAccessor))
if err != nil {
return nil, err
}
var buf bytes.Buffer
err = codec.Encode(res, &buf)
if err != nil {
return nil, err
}
rsp.Body = buf.Bytes()
status := reflect.ValueOf(res).Elem().FieldByName("Status")
if status != (reflect.Value{}) {
rsp.Status, err = json.Marshal(status.Interface())
if err != nil {
return nil, err
}
}
return rsp, nil
}

View File

@ -1,219 +0,0 @@
package entity
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"github.com/grafana/grafana/pkg/apis/playlist/v0alpha1"
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
entityStore "github.com/grafana/grafana/pkg/services/store/entity"
)
func TestResourceToEntity(t *testing.T) {
createdAt := metav1.Now()
createdAtStr := createdAt.UTC().Format(time.RFC3339)
// truncate here because RFC3339 doesn't support millisecond precision
// consider updating accessor to use RFC3339Nano to encode timestamps
updatedAt := createdAt.Add(time.Hour).Truncate(time.Second)
updatedAtStr := updatedAt.UTC().Format(time.RFC3339)
Scheme := runtime.NewScheme()
Scheme.AddKnownTypes(v0alpha1.PlaylistResourceInfo.GroupVersion(), &v0alpha1.Playlist{})
Codecs := serializer.NewCodecFactory(Scheme)
testCases := []struct {
key grafanaregistry.Key
resource runtime.Object
codec runtime.Codec
expectedKey string
expectedGroupVersion string
expectedName string
expectedNamespace string
expectedTitle string
expectedGuid string
expectedVersion string
expectedFolder string
expectedCreatedAt int64
expectedUpdatedAt int64
expectedCreatedBy string
expectedUpdatedBy string
expectedSlug string
expectedOrigin *entityStore.EntityOriginInfo
expectedLabels map[string]string
expectedMeta []byte
expectedBody []byte
}{
{
key: grafanaregistry.Key{
Group: "playlist.grafana.app",
Resource: "playlists",
Namespace: "default",
Name: "test-name",
},
resource: &v0alpha1.Playlist{
TypeMeta: metav1.TypeMeta{
APIVersion: "playlist.grafana.app/v0alpha1",
Kind: "Playlist",
},
ObjectMeta: metav1.ObjectMeta{
CreationTimestamp: createdAt,
Labels: map[string]string{"label1": "value1", "label2": "value2"},
Name: "test-name",
ResourceVersion: "1",
UID: "test-uid",
Annotations: map[string]string{
"grafana.app/createdBy": "test-created-by",
"grafana.app/updatedBy": "test-updated-by",
"grafana.app/updatedTimestamp": updatedAtStr,
"grafana.app/folder": "test-folder",
"grafana.app/slug": "test-slug",
},
},
Spec: v0alpha1.Spec{
Title: "A playlist",
Interval: "5m",
Items: []v0alpha1.Item{
{Type: v0alpha1.ItemTypeDashboardByTag, Value: "panel-tests"},
{Type: v0alpha1.ItemTypeDashboardByUid, Value: "vmie2cmWz"},
},
},
},
expectedKey: "/group/playlist.grafana.app/resource/playlists/namespace/default/name/test-name",
expectedGroupVersion: "v0alpha1",
expectedName: "test-name",
expectedNamespace: "default",
expectedTitle: "A playlist",
expectedGuid: "test-uid",
expectedVersion: "1",
expectedFolder: "test-folder",
expectedCreatedAt: createdAt.UnixMilli(),
expectedUpdatedAt: updatedAt.UnixMilli(),
expectedCreatedBy: "test-created-by",
expectedUpdatedBy: "test-updated-by",
expectedSlug: "test-slug",
expectedOrigin: &entityStore.EntityOriginInfo{Source: "", Key: ""},
expectedLabels: map[string]string{"label1": "value1", "label2": "value2"},
expectedMeta: []byte(fmt.Sprintf(`{"metadata":{"name":"test-name","uid":"test-uid","resourceVersion":"1","creationTimestamp":%q,"labels":{"label1":"value1","label2":"value2"},"annotations":{"grafana.app/createdBy":"test-created-by","grafana.app/folder":"test-folder","grafana.app/slug":"test-slug","grafana.app/updatedBy":"test-updated-by","grafana.app/updatedTimestamp":%q}}}`, createdAtStr, updatedAtStr)),
expectedBody: []byte(fmt.Sprintf(`{"kind":"Playlist","apiVersion":"playlist.grafana.app/v0alpha1","metadata":{"name":"test-name","uid":"test-uid","resourceVersion":"1","creationTimestamp":%q,"labels":{"label1":"value1","label2":"value2"},"annotations":{"grafana.app/createdBy":"test-created-by","grafana.app/folder":"test-folder","grafana.app/slug":"test-slug","grafana.app/updatedBy":"test-updated-by","grafana.app/updatedTimestamp":%q}},"spec":{"title":"A playlist","interval":"5m","items":[{"type":"dashboard_by_tag","value":"panel-tests"},{"type":"dashboard_by_uid","value":"vmie2cmWz"}]}}`, createdAtStr, updatedAtStr)),
},
}
for _, tc := range testCases {
t.Run(tc.resource.GetObjectKind().GroupVersionKind().Kind+" to entity conversion should succeed", func(t *testing.T) {
entity, err := resourceToEntity(tc.resource, tc.key, Codecs.LegacyCodec(v0alpha1.PlaylistResourceInfo.GroupVersion()))
require.NoError(t, err)
assert.Equal(t, tc.expectedKey, entity.Key)
assert.Equal(t, tc.expectedName, entity.Name)
assert.Equal(t, tc.expectedNamespace, entity.Namespace)
assert.Equal(t, tc.expectedTitle, entity.Title)
assert.Equal(t, tc.expectedGroupVersion, entity.GroupVersion)
assert.Equal(t, tc.expectedName, entity.Name)
assert.Equal(t, tc.expectedGuid, entity.Guid)
assert.Equal(t, tc.expectedFolder, entity.Folder)
assert.Equal(t, tc.expectedCreatedAt, entity.CreatedAt)
assert.Equal(t, tc.expectedUpdatedAt, entity.UpdatedAt)
assert.Equal(t, tc.expectedCreatedBy, entity.CreatedBy)
assert.Equal(t, tc.expectedUpdatedBy, entity.UpdatedBy)
assert.Equal(t, tc.expectedSlug, entity.Slug)
assert.Equal(t, tc.expectedOrigin, entity.Origin)
assert.Equal(t, tc.expectedLabels, entity.Labels)
assert.Equal(t, tc.expectedMeta, entity.Meta)
assert.Equal(t, tc.expectedBody, entity.Body[:len(entity.Body)-1]) // remove trailing newline
})
}
}
func TestEntityToResource(t *testing.T) {
createdAt := metav1.Now()
createdAtStr := createdAt.UTC().Format(time.RFC3339)
updatedAt := createdAt.Add(time.Hour)
updatedAtStr := updatedAt.UTC().Format(time.RFC3339)
Scheme := runtime.NewScheme()
Scheme.AddKnownTypes(v0alpha1.PlaylistResourceInfo.GroupVersion(), &v0alpha1.Playlist{})
Codecs := serializer.NewCodecFactory(Scheme)
testCases := []struct {
entity *entityStore.Entity
codec runtime.Codec
expectedApiVersion string
expectedCreationTimestamp metav1.Time
expectedLabels map[string]string
expectedName string
expectedResourceVersion string
expectedUid string
expectedTitle string
expectedAnnotations map[string]string
expectedSpec any
}{
{
entity: &entityStore.Entity{
Key: "/group/playlist.grafana.app/resource/playlists/namespaces/default/name/test-uid",
GroupVersion: "v0alpha1",
Name: "test-uid",
Title: "A playlist",
Guid: "test-guid",
Folder: "test-folder",
CreatedBy: "test-created-by",
CreatedAt: createdAt.UnixMilli(),
UpdatedAt: updatedAt.UnixMilli(),
UpdatedBy: "test-updated-by",
Slug: "test-slug",
Origin: &entityStore.EntityOriginInfo{},
Labels: map[string]string{"label1": "value1", "label2": "value2"},
Meta: []byte(fmt.Sprintf(`{"metadata":{"name":"test-name","uid":"test-uid","resourceVersion":"1","creationTimestamp":%q,"labels":{"label1":"value1","label2":"value2"},"annotations":{"grafana.app/createdBy":"test-created-by","grafana.app/folder":"test-folder","grafana.app/slug":"test-slug","grafana.app/updatedTimestamp":%q,"grafana.app/updatedBy":"test-updated-by"}}}`, createdAtStr, updatedAtStr)),
Body: []byte(fmt.Sprintf(`{"kind":"Playlist","apiVersion":"playlist.grafana.app/v0alpha1","metadata":{"name":"test-name","uid":"test-uid","resourceVersion":"1","creationTimestamp":%q,"labels":{"label1":"value1","label2":"value2"},"annotations":{"grafana.app/createdBy":"test-created-by","grafana.app/folder":"test-folder","grafana.app/slug":"test-slug","grafana.app/updatedBy":"test-updated-by","grafana.app/updatedTimestamp":%q}},"spec":{"title":"A playlist","interval":"5m","items":[{"type":"dashboard_by_tag","value":"panel-tests"},{"type":"dashboard_by_uid","value":"vmie2cmWz"}]}}`, createdAtStr, updatedAtStr)),
ResourceVersion: 1,
Action: entityStore.Entity_CREATED,
},
codec: runtime.Codec(nil),
expectedApiVersion: "playlist.grafana.app/v0alpha1",
expectedCreationTimestamp: createdAt,
expectedLabels: map[string]string{"label1": "value1", "label2": "value2"},
expectedName: "test-uid",
expectedTitle: "test-name",
expectedResourceVersion: "1",
expectedUid: "test-guid",
expectedAnnotations: map[string]string{
"grafana.app/createdBy": "test-created-by",
"grafana.app/folder": "test-folder",
"grafana.app/slug": "test-slug",
"grafana.app/updatedBy": "test-updated-by",
"grafana.app/updatedTimestamp": updatedAtStr,
},
expectedSpec: v0alpha1.Spec{
Title: "A playlist",
Interval: "5m",
Items: []v0alpha1.Item{
{Type: v0alpha1.ItemTypeDashboardByTag, Value: "panel-tests"},
{Type: v0alpha1.ItemTypeDashboardByUid, Value: "vmie2cmWz"},
},
},
},
}
for _, tc := range testCases {
t.Run(tc.entity.Key+" to resource conversion should succeed", func(t *testing.T) {
var p v0alpha1.Playlist
err := EntityToRuntimeObject(tc.entity, &p, Codecs.LegacyCodec(v0alpha1.PlaylistResourceInfo.GroupVersion()))
require.NoError(t, err)
assert.Equal(t, tc.expectedApiVersion, p.TypeMeta.APIVersion)
assert.Equal(t, tc.expectedCreationTimestamp.Unix(), p.ObjectMeta.CreationTimestamp.Unix())
assert.Equal(t, tc.expectedLabels, p.ObjectMeta.Labels)
assert.Equal(t, tc.expectedName, p.ObjectMeta.Name)
assert.Equal(t, tc.expectedResourceVersion, p.ObjectMeta.ResourceVersion)
assert.Equal(t, tc.expectedUid, string(p.ObjectMeta.UID))
assert.Equal(t, tc.expectedAnnotations, p.ObjectMeta.Annotations)
assert.Equal(t, tc.expectedSpec, p.Spec)
})
}
}

View File

@ -1,204 +0,0 @@
# Unified Storage
The unified storage projects aims to provide a simple and extensible backend to unify the way we store different objects within the Grafana app platform.
It provides generic storage for k8s objects, and can store data either within dedicated tables in the main Grafana database, or in separate storage.
By default it runs in-process within Grafana, but it can also be run as a standalone GRPC service (`storage-server`).
## Storage Overview
There are 2 main tables, the `entity` table stores a "current" view of the objects, and the `entity_history` table stores a record of each revision of a given object.
## Running Unified Storage
### Baseline configuration
The minimum config settings required are:
```ini
; need to specify target here for override to work later
target = all
[server]
; https is required for kubectl
protocol = https
[feature_toggles]
; enable unified storage
unifiedStorage = true
; enable k8s apiserver
grafanaAPIServer = true
; store playlists in k8s
kubernetesPlaylists = true
; store json id token in context
idForwarding = true
[grafana-apiserver]
; use unified storage for k8s apiserver
storage_type = unified
```
With this configuration, you can run everything in-process. Run the Grafana backend with:
```sh
bra run
```
or
```sh
make run
```
The default kubeconfig sends requests directly to the apiserver, to authenticate as a grafana user, create `grafana.kubeconfig`:
```yaml
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: https://127.0.0.1:3000
name: default-cluster
contexts:
- context:
cluster: default-cluster
namespace: default
user: default
name: default-context
current-context: default-context
kind: Config
preferences: {}
users:
- name: default
user:
username: <username>
password: <password>
```
Where `<username>` and `<password>` are credentials for basic auth against Grafana. For example, with the [default credentials](https://github.com/grafana/grafana/blob/HEAD/contribute/developer-guide.md#backend):
```yaml
username: admin
password: admin
```
In this mode, you can interact with the k8s api. Make sure you are in the directory where you created `grafana.kubeconfig`. Then run:
```sh
kubectl --kubeconfig=./grafana.kubeconfig get playlist
```
If this is your first time running the command, a successful response would be:
```sh
No resources found in default namespace.
```
To create a playlist, create a file `playlist-generate.yaml`:
```yaml
apiVersion: playlist.grafana.app/v0alpha1
kind: Playlist
metadata:
generateName: x # anything is ok here... except yes or true -- they become boolean!
labels:
foo: bar
annotations:
grafana.app/slug: "slugger"
grafana.app/updatedBy: "updater"
spec:
title: Playlist with auto generated UID
interval: 5m
items:
- type: dashboard_by_tag
value: panel-tests
- type: dashboard_by_uid
value: vmie2cmWz # dashboard from devenv
```
then run:
```sh
kubectl --kubeconfig=./grafana.kubeconfig create -f playlist-generate.yaml
```
For example, a successful response would be:
```sh
playlist.playlist.grafana.app/u394j4d3-s63j-2d74-g8hf-958773jtybf2 created
```
When running
```sh
kubectl --kubeconfig=./grafana.kubeconfig get playlist
```
you should now see something like:
```sh
NAME TITLE INTERVAL CREATED AT
u394j4d3-s63j-2d74-g8hf-958773jtybf2 Playlist with auto generated UID 5m 2023-12-14T13:53:35Z
```
To update the playlist, update the `playlist-generate.yaml` file then run:
```sh
kubectl --kubeconfig=./grafana.kubeconfig patch playlist <NAME> --patch-file playlist-generate.yaml
```
In the example, `<NAME>` would be `u394j4d3-s63j-2d74-g8hf-958773jtybf2`.
### Use a separate database
By default Unified Storage uses the Grafana database. To run against a separate database, update `custom.ini` by adding the following section to it:
```
[entity_api]
db_type = mysql
db_host = localhost:3306
db_name = grafana
db_user = <username>
db_pass = <password>
```
MySQL and Postgres are both supported. The `<username>` and `<password>` values can be found in the following devenv docker compose files: [MySQL](https://github.com/grafana/grafana/blob/main/devenv/docker/blocks/mysql/docker-compose.yaml#L6-L7) and [Postgres](https://github.com/grafana/grafana/blob/main/devenv/docker/blocks/postgres/docker-compose.yaml#L4-L5).
Then, run
```sh
make devenv sources=<source>
```
where source is either `mysql` or `postgres`.
Finally, run the Grafana backend with
```sh
bra run
```
or
```sh
make run
```
### Run as a GRPC service
#### Start GRPC storage-server
This currently only works with a separate database configuration (see previous section).
Start the storage-server with:
```sh
GF_DEFAULT_TARGET=storage-server ./bin/grafana server target
```
The GRPC service will listen on port 10000
#### Use GRPC server
To run grafana against the storage-server, override the `storage_type` setting:
```sh
GF_GRAFANA_APISERVER_STORAGE_TYPE=unified-grpc ./bin/grafana server
```
You can then list the previously-created playlists with:
```sh
kubectl --kubeconfig=./grafana.kubeconfig get playlist
```
## Changing protobuf interface
- install [protoc](https://grpc.io/docs/protoc-installation/)
- install the protocol compiler plugin for Go
```sh
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
```
- make changes in `.proto` file
- to compile all protobuf files in the repository run `make protobuf` at its top level

View File

@ -1,10 +0,0 @@
version: v1
plugins:
- plugin: go
out: pkg/services/store/entity
opt: paths=source_relative
- plugin: go-grpc
out: pkg/services/store/entity
opt:
- paths=source_relative
- require_unimplemented_servers=false

View File

@ -1,7 +0,0 @@
version: v1
breaking:
use:
- FILE
lint:
use:
- DEFAULT

View File

@ -1,30 +0,0 @@
package entity
import (
"github.com/fullstorydev/grpchan"
"github.com/fullstorydev/grpchan/inprocgrpc"
grpcAuth "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/auth"
"google.golang.org/grpc"
grpcUtils "github.com/grafana/grafana/pkg/storage/unified/resource/grpc"
)
func NewEntityStoreClientLocal(server EntityStoreServer) EntityStoreClient {
channel := &inprocgrpc.Channel{}
auth := &grpcUtils.Authenticator{}
channel.RegisterService(
grpchan.InterceptServer(
&EntityStore_ServiceDesc,
grpcAuth.UnaryServerInterceptor(auth.Authenticate),
grpcAuth.StreamServerInterceptor(auth.Authenticate),
),
server,
)
return NewEntityStoreClient(grpchan.InterceptClientConn(channel, grpcUtils.UnaryClientInterceptor, grpcUtils.StreamClientInterceptor))
}
func NewEntityStoreClientGRPC(channel *grpc.ClientConn) EntityStoreClient {
return NewEntityStoreClient(grpchan.InterceptClientConn(channel, grpcUtils.UnaryClientInterceptor, grpcUtils.StreamClientInterceptor))
}

View File

@ -1,59 +0,0 @@
package dbimpl
import (
"context"
"database/sql"
"fmt"
entitydb "github.com/grafana/grafana/pkg/services/store/entity/db"
)
func NewDB(d *sql.DB, driverName string) entitydb.DB {
return sqldb{
DB: d,
driverName: driverName,
}
}
type sqldb struct {
*sql.DB
driverName string
}
func (d sqldb) DriverName() string {
return d.driverName
}
func (d sqldb) BeginTx(ctx context.Context, opts *sql.TxOptions) (entitydb.Tx, error) {
t, err := d.DB.BeginTx(ctx, opts)
if err != nil {
return nil, err
}
return tx{
Tx: t,
}, nil
}
func (d sqldb) WithTx(ctx context.Context, opts *sql.TxOptions, f entitydb.TxFunc) error {
t, err := d.BeginTx(ctx, opts)
if err != nil {
return fmt.Errorf("begin tx: %w", err)
}
if err := f(ctx, t); err != nil {
if rollbackErr := t.Rollback(); rollbackErr != nil {
return fmt.Errorf("tx err: %w; rollback err: %w", err, rollbackErr)
}
return fmt.Errorf("tx err: %w", err)
}
if err = t.Commit(); err != nil {
return fmt.Errorf("commit err: %w", err)
}
return nil
}
type tx struct {
*sql.Tx
}

View File

@ -1,105 +0,0 @@
package dbimpl
import (
"cmp"
"fmt"
"strings"
"time"
"github.com/go-sql-driver/mysql"
"xorm.io/xorm"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/store/entity/db"
)
func getEngineMySQL(getter *sectionGetter, _ tracing.Tracer) (*xorm.Engine, error) {
config := mysql.NewConfig()
config.User = getter.String("db_user")
config.Passwd = getter.String("db_pass")
config.Net = "tcp"
config.Addr = getter.String("db_host")
config.DBName = getter.String("db_name")
config.Params = map[string]string{
// See: https://dev.mysql.com/doc/refman/en/sql-mode.html
"@@SESSION.sql_mode": "ANSI",
}
config.Collation = "utf8mb4_unicode_ci"
config.Loc = time.UTC
config.AllowNativePasswords = true
config.ClientFoundRows = true
// TODO: do we want to support these?
// config.ServerPubKey = getter.String("db_server_pub_key")
// config.TLSConfig = getter.String("db_tls_config_name")
if err := getter.Err(); err != nil {
return nil, fmt.Errorf("config error: %w", err)
}
if strings.HasPrefix(config.Addr, "/") {
config.Net = "unix"
}
// FIXME: get rid of xorm
engine, err := xorm.NewEngine(db.DriverMySQL, config.FormatDSN())
if err != nil {
return nil, fmt.Errorf("open database: %w", err)
}
engine.SetMaxOpenConns(0)
engine.SetMaxIdleConns(2)
engine.SetConnMaxLifetime(4 * time.Hour)
return engine, nil
}
func getEnginePostgres(getter *sectionGetter, _ tracing.Tracer) (*xorm.Engine, error) {
dsnKV := map[string]string{
"user": getter.String("db_user"),
"password": getter.String("db_pass"),
"dbname": getter.String("db_name"),
"sslmode": cmp.Or(getter.String("db_sslmode"), "disable"),
}
// TODO: probably interesting:
// "passfile", "statement_timeout", "lock_timeout", "connect_timeout"
// TODO: for CockroachDB, we probably need to use the following:
// dsnKV["options"] = "-c enable_experimental_alter_column_type_general=true"
// Or otherwise specify it as:
// dsnKV["enable_experimental_alter_column_type_general"] = "true"
// TODO: do we want to support these options in the DSN as well?
// "sslkey", "sslcert", "sslrootcert", "sslpassword", "sslsni", "krbspn",
// "krbsrvname", "target_session_attrs", "service", "servicefile"
// More on Postgres connection string parameters:
// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
hostport := getter.String("db_host")
if err := getter.Err(); err != nil {
return nil, fmt.Errorf("config error: %w", err)
}
host, port, err := splitHostPortDefault(hostport, "127.0.0.1", "5432")
if err != nil {
return nil, fmt.Errorf("invalid db_host: %w", err)
}
dsnKV["host"] = host
dsnKV["port"] = port
dsn, err := MakeDSN(dsnKV)
if err != nil {
return nil, fmt.Errorf("error building DSN: %w", err)
}
// FIXME: get rid of xorm
engine, err := xorm.NewEngine(db.DriverPostgres, dsn)
if err != nil {
return nil, fmt.Errorf("open database: %w", err)
}
return engine, nil
}

View File

@ -1,92 +0,0 @@
package dbimpl
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetEngineMySQLFromConfig(t *testing.T) {
t.Parallel()
t.Run("happy path", func(t *testing.T) {
t.Parallel()
getter := newTestSectionGetter(map[string]string{
"db_type": "mysql",
"db_host": "/var/run/mysql.socket",
"db_name": "grafana",
"db_user": "user",
"db_password": "password",
})
engine, err := getEngineMySQL(getter, nil)
assert.NotNil(t, engine)
assert.NoError(t, err)
})
t.Run("invalid string", func(t *testing.T) {
t.Parallel()
getter := newTestSectionGetter(map[string]string{
"db_type": "mysql",
"db_host": "/var/run/mysql.socket",
"db_name": string(invalidUTF8ByteSequence),
"db_user": "user",
"db_password": "password",
})
engine, err := getEngineMySQL(getter, nil)
assert.Nil(t, engine)
assert.Error(t, err)
assert.ErrorIs(t, err, ErrInvalidUTF8Sequence)
})
}
func TestGetEnginePostgresFromConfig(t *testing.T) {
t.Parallel()
t.Run("happy path", func(t *testing.T) {
t.Parallel()
getter := newTestSectionGetter(map[string]string{
"db_type": "mysql",
"db_host": "localhost",
"db_name": "grafana",
"db_user": "user",
"db_password": "password",
})
engine, err := getEnginePostgres(getter, nil)
assert.NotNil(t, engine)
assert.NoError(t, err)
})
t.Run("invalid string", func(t *testing.T) {
t.Parallel()
getter := newTestSectionGetter(map[string]string{
"db_type": "mysql",
"db_host": string(invalidUTF8ByteSequence),
"db_name": "grafana",
"db_user": "user",
"db_password": "password",
})
engine, err := getEnginePostgres(getter, nil)
assert.Nil(t, engine)
assert.Error(t, err)
assert.ErrorIs(t, err, ErrInvalidUTF8Sequence)
})
t.Run("invalid hostport", func(t *testing.T) {
t.Parallel()
getter := newTestSectionGetter(map[string]string{
"db_type": "mysql",
"db_host": "1:1:1",
"db_name": "grafana",
"db_user": "user",
"db_password": "password",
})
engine, err := getEnginePostgres(getter, nil)
assert.Nil(t, engine)
assert.Error(t, err)
})
}

View File

@ -1,154 +0,0 @@
package dbimpl
import (
"context"
"errors"
"testing"
"time"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/require"
entitydb "github.com/grafana/grafana/pkg/services/store/entity/db"
)
func newCtx(t *testing.T) context.Context {
t.Helper()
d, ok := t.Deadline()
if !ok {
// provide a default timeout for tests
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
t.Cleanup(cancel)
return ctx
}
ctx, cancel := context.WithDeadline(context.Background(), d)
t.Cleanup(cancel)
return ctx
}
var errTest = errors.New("because of reasons")
const driverName = "sqlmock"
func TestDB_BeginTx(t *testing.T) {
t.Parallel()
t.Run("happy path", func(t *testing.T) {
t.Parallel()
sqldb, mock, err := sqlmock.New()
require.NoError(t, err)
db := NewDB(sqldb, driverName)
require.Equal(t, driverName, db.DriverName())
mock.ExpectBegin()
tx, err := db.BeginTx(newCtx(t), nil)
require.NoError(t, err)
require.NotNil(t, tx)
})
t.Run("fail begin", func(t *testing.T) {
t.Parallel()
sqldb, mock, err := sqlmock.New()
require.NoError(t, err)
db := NewDB(sqldb, "sqlmock")
mock.ExpectBegin().WillReturnError(errTest)
tx, err := db.BeginTx(newCtx(t), nil)
require.Nil(t, tx)
require.Error(t, err)
require.ErrorIs(t, err, errTest)
})
}
func TestDB_WithTx(t *testing.T) {
t.Parallel()
newTxFunc := func(err error) entitydb.TxFunc {
return func(context.Context, entitydb.Tx) error {
return err
}
}
t.Run("happy path", func(t *testing.T) {
t.Parallel()
sqldb, mock, err := sqlmock.New()
require.NoError(t, err)
db := NewDB(sqldb, "sqlmock")
mock.ExpectBegin()
mock.ExpectCommit()
err = db.WithTx(newCtx(t), nil, newTxFunc(nil))
require.NoError(t, err)
})
t.Run("fail begin", func(t *testing.T) {
t.Parallel()
sqldb, mock, err := sqlmock.New()
require.NoError(t, err)
db := NewDB(sqldb, "sqlmock")
mock.ExpectBegin().WillReturnError(errTest)
err = db.WithTx(newCtx(t), nil, newTxFunc(nil))
require.Error(t, err)
require.ErrorIs(t, err, errTest)
})
t.Run("fail tx", func(t *testing.T) {
t.Parallel()
sqldb, mock, err := sqlmock.New()
require.NoError(t, err)
db := NewDB(sqldb, "sqlmock")
mock.ExpectBegin()
mock.ExpectRollback()
err = db.WithTx(newCtx(t), nil, newTxFunc(errTest))
require.Error(t, err)
require.ErrorIs(t, err, errTest)
})
t.Run("fail tx; fail rollback", func(t *testing.T) {
t.Parallel()
sqldb, mock, err := sqlmock.New()
require.NoError(t, err)
db := NewDB(sqldb, "sqlmock")
errTest2 := errors.New("yet another err")
mock.ExpectBegin()
mock.ExpectRollback().WillReturnError(errTest)
err = db.WithTx(newCtx(t), nil, newTxFunc(errTest2))
require.Error(t, err)
require.ErrorIs(t, err, errTest)
require.ErrorIs(t, err, errTest2)
})
t.Run("fail commit", func(t *testing.T) {
t.Parallel()
sqldb, mock, err := sqlmock.New()
require.NoError(t, err)
db := NewDB(sqldb, "sqlmock")
mock.ExpectBegin()
mock.ExpectCommit().WillReturnError(errTest)
err = db.WithTx(newCtx(t), nil, newTxFunc(nil))
require.Error(t, err)
require.ErrorIs(t, err, errTest)
})
}

View File

@ -1,165 +0,0 @@
package dbimpl
import (
"fmt"
"sync"
"github.com/dlmiddlecote/sqlstats"
"github.com/jmoiron/sqlx"
"github.com/prometheus/client_golang/prometheus"
"xorm.io/xorm"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/sqlstore/session"
entitydb "github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/services/store/entity/db/migrations"
"github.com/grafana/grafana/pkg/setting"
)
var _ entitydb.EntityDBInterface = (*EntityDB)(nil)
func ProvideEntityDB(db db.DB, cfg *setting.Cfg, features featuremgmt.FeatureToggles, tracer tracing.Tracer) (*EntityDB, error) {
return &EntityDB{
db: db,
cfg: cfg,
features: features,
log: log.New("entity-db"),
tracer: tracer,
}, nil
}
type EntityDB struct {
once sync.Once
onceErr error
db db.DB
features featuremgmt.FeatureToggles
engine *xorm.Engine
cfg *setting.Cfg
log log.Logger
tracer tracing.Tracer
}
func (db *EntityDB) Init() error {
db.once.Do(func() {
db.onceErr = db.init()
})
return db.onceErr
}
func (db *EntityDB) GetEngine() (*xorm.Engine, error) {
if err := db.Init(); err != nil {
return nil, err
}
return db.engine, db.onceErr
}
func (db *EntityDB) init() error {
if db.engine != nil {
return nil
}
var engine *xorm.Engine
var err error
getter := &sectionGetter{
DynamicSection: db.cfg.SectionWithEnvOverrides("entity_api"),
}
dbType := getter.Key("db_type").MustString("")
// if explicit connection settings are provided, use them
if dbType != "" {
if dbType == "postgres" {
engine, err = getEnginePostgres(getter, db.tracer)
if err != nil {
return err
}
// FIXME: this config option is cockroachdb-specific, it's not supported by postgres
// FIXME: this only sets this option for the session that we get
// from the pool right now. A *sql.DB is a pool of connections,
// there is no guarantee that the session where this is run will be
// the same where we need to change the type of a column
_, err = engine.Exec("SET SESSION enable_experimental_alter_column_type_general=true")
if err != nil {
db.log.Error("error connecting to postgres", "msg", err.Error())
// FIXME: return nil, err
}
} else if dbType == "mysql" {
engine, err = getEngineMySQL(getter, db.tracer)
if err != nil {
return err
}
if err = engine.Ping(); err != nil {
return err
}
} else {
// TODO: sqlite support
return fmt.Errorf("invalid db type specified: %s", dbType)
}
// register sql stat metrics
if err := prometheus.Register(sqlstats.NewStatsCollector("unified_storage", engine.DB().DB)); err != nil {
db.log.Warn("Failed to register unified storage sql stats collector", "error", err)
}
// configure sql logging
debugSQL := getter.Key("log_queries").MustBool(false)
if !debugSQL {
engine.SetLogger(&xorm.DiscardLogger{})
} else {
// add stack to database calls to be able to see what repository initiated queries. Top 7 items from the stack as they are likely in the xorm library.
// engine.SetLogger(sqlstore.NewXormLogger(log.LvlInfo, log.WithSuffix(log.New("sqlstore.xorm"), log.CallerContextKey, log.StackCaller(log.DefaultCallerDepth))))
engine.ShowSQL(true)
engine.ShowExecTime(true)
}
// otherwise, try to use the grafana db connection
} else {
if db.db == nil {
return fmt.Errorf("no db connection provided")
}
engine = db.db.GetEngine()
}
db.engine = engine
if err := migrations.MigrateEntityStore(engine, db.cfg, db.features); err != nil {
db.engine = nil
return fmt.Errorf("run migrations: %w", err)
}
return nil
}
func (db *EntityDB) GetSession() (*session.SessionDB, error) {
engine, err := db.GetEngine()
if err != nil {
return nil, err
}
return session.GetSession(sqlx.NewDb(engine.DB().DB, engine.DriverName())), nil
}
func (db *EntityDB) GetCfg() *setting.Cfg {
return db.cfg
}
func (db *EntityDB) GetDB() (entitydb.DB, error) {
engine, err := db.GetEngine()
if err != nil {
return nil, err
}
ret := NewDB(engine.DB().DB, engine.Dialect().DriverName())
return ret, nil
}

View File

@ -1,111 +0,0 @@
package dbimpl
import (
"cmp"
"errors"
"fmt"
"net"
"sort"
"strings"
"unicode/utf8"
"github.com/grafana/grafana/pkg/setting"
)
var (
ErrInvalidUTF8Sequence = errors.New("invalid UTF-8 sequence")
)
type sectionGetter struct {
*setting.DynamicSection
err error
}
func (g *sectionGetter) Err() error {
return g.err
}
func (g *sectionGetter) String(key string) string {
v := g.DynamicSection.Key(key).MustString("")
if !utf8.ValidString(v) {
g.err = fmt.Errorf("value for key %q: %w", key, ErrInvalidUTF8Sequence)
return ""
}
return v
}
// MakeDSN creates a DSN from the given key/value pair. It validates the strings
// form valid UTF-8 sequences and escapes values if needed.
func MakeDSN(m map[string]string) (string, error) {
b := new(strings.Builder)
ks := keys(m)
sort.Strings(ks) // provide deterministic behaviour
for _, k := range ks {
v := m[k]
if !utf8.ValidString(v) {
return "", fmt.Errorf("value for DSN key %q: %w", k,
ErrInvalidUTF8Sequence)
}
if v == "" {
continue
}
if b.Len() > 0 {
_ = b.WriteByte(' ')
}
_, _ = b.WriteString(k)
_ = b.WriteByte('=')
writeDSNValue(b, v)
}
return b.String(), nil
}
func keys(m map[string]string) []string {
ret := make([]string, 0, len(m))
for k := range m {
ret = append(ret, k)
}
return ret
}
func writeDSNValue(b *strings.Builder, v string) {
numq := strings.Count(v, `'`)
numb := strings.Count(v, `\`)
if numq+numb == 0 && v != "" {
b.WriteString(v)
return
}
b.Grow(2 + numq + numb + len(v))
_ = b.WriteByte('\'')
for _, r := range v {
if r == '\\' || r == '\'' {
_ = b.WriteByte('\\')
}
_, _ = b.WriteRune(r)
}
_ = b.WriteByte('\'')
}
// splitHostPortDefault is similar to net.SplitHostPort, but will also accept a
// specification with no port and apply the default port instead. It also
// applies the given defaults if the results are empty strings.
func splitHostPortDefault(hostport, defaultHost, defaultPort string) (string, string, error) {
host, port, err := net.SplitHostPort(hostport)
if err != nil {
// try appending the port
host, port, err = net.SplitHostPort(hostport + ":" + defaultPort)
if err != nil {
return "", "", fmt.Errorf("invalid hostport: %q", hostport)
}
}
host = cmp.Or(host, defaultHost)
port = cmp.Or(port, defaultPort)
return host, port, nil
}

View File

@ -1,108 +0,0 @@
package dbimpl
import (
"fmt"
"testing"
"github.com/grafana/grafana/pkg/setting"
"github.com/stretchr/testify/require"
)
var invalidUTF8ByteSequence = []byte{0xff, 0xfe, 0xfd}
func setSectionKeyValues(section *setting.DynamicSection, m map[string]string) {
for k, v := range m {
section.Key(k).SetValue(v)
}
}
func newTestSectionGetter(m map[string]string) *sectionGetter {
section := setting.NewCfg().SectionWithEnvOverrides("entity_api")
setSectionKeyValues(section, m)
return &sectionGetter{
DynamicSection: section,
}
}
func TestSectionGetter(t *testing.T) {
t.Parallel()
var (
key = "the key"
val = string(invalidUTF8ByteSequence)
)
g := newTestSectionGetter(map[string]string{
key: val,
})
v := g.String("whatever")
require.Empty(t, v)
require.NoError(t, g.Err())
v = g.String(key)
require.Empty(t, v)
require.Error(t, g.Err())
require.ErrorIs(t, g.Err(), ErrInvalidUTF8Sequence)
}
func TestMakeDSN(t *testing.T) {
t.Parallel()
s, err := MakeDSN(map[string]string{
"db_name": string(invalidUTF8ByteSequence),
})
require.Empty(t, s)
require.Error(t, err)
require.ErrorIs(t, err, ErrInvalidUTF8Sequence)
s, err = MakeDSN(map[string]string{
"skip": "",
"user": `shou'ld esc\ape`,
"pass": "noescape",
})
require.NoError(t, err)
require.Equal(t, `pass=noescape user='shou\'ld esc\\ape'`, s)
}
func TestSplitHostPort(t *testing.T) {
t.Parallel()
testCases := []struct {
hostport string
defaultHost string
defaultPort string
fails bool
host string
port string
}{
{hostport: "192.168.0.140:456", defaultHost: "", defaultPort: "", host: "192.168.0.140", port: "456"},
{hostport: "192.168.0.140", defaultHost: "", defaultPort: "123", host: "192.168.0.140", port: "123"},
{hostport: "[::1]:456", defaultHost: "", defaultPort: "", host: "::1", port: "456"},
{hostport: "[::1]", defaultHost: "", defaultPort: "123", host: "::1", port: "123"},
{hostport: ":456", defaultHost: "1.2.3.4", defaultPort: "", host: "1.2.3.4", port: "456"},
{hostport: "xyz.rds.amazonaws.com", defaultHost: "", defaultPort: "123", host: "xyz.rds.amazonaws.com", port: "123"},
{hostport: "xyz.rds.amazonaws.com:123", defaultHost: "", defaultPort: "", host: "xyz.rds.amazonaws.com", port: "123"},
{hostport: "", defaultHost: "localhost", defaultPort: "1433", host: "localhost", port: "1433"},
{hostport: "1:1:1", fails: true},
}
for i, tc := range testCases {
t.Run(fmt.Sprintf("test index #%d", i), func(t *testing.T) {
t.Parallel()
host, port, err := splitHostPortDefault(tc.hostport, tc.defaultHost, tc.defaultPort)
if tc.fails {
require.Error(t, err)
require.Empty(t, host)
require.Empty(t, port)
} else {
require.NoError(t, err)
require.Equal(t, tc.host, host)
require.Equal(t, tc.port, port)
}
})
}
}

View File

@ -1,214 +0,0 @@
package migrations
import (
"fmt"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
)
func initEntityTables(mg *migrator.Migrator) string {
marker := "Initialize entity tables (v15)" // changing this key wipe+rewrite everything
mg.AddMigration(marker, &migrator.RawSQLMigration{})
tables := []migrator.Table{}
tables = append(tables, migrator.Table{
Name: "entity",
Columns: []*migrator.Column{
// primary identifier
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false, IsPrimaryKey: true},
{Name: "resource_version", Type: migrator.DB_BigInt, Nullable: false},
// The entity identifier (TODO: remove -- this is a duplicate)
{Name: "key", Type: migrator.DB_Text, Nullable: false},
// K8s Identity group+(version)+namespace+resource+name
{Name: "group", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "group_version", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "resource", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "namespace", Type: migrator.DB_NVarchar, Length: 63, Nullable: false},
{Name: "name", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "folder", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // uid of folder
// The raw entity body (any byte array)
{Name: "meta", Type: migrator.DB_Text, Nullable: true}, // raw meta object from k8s (with standard stuff removed)
{Name: "body", Type: migrator.DB_LongText, Nullable: true}, // null when nested or remote
{Name: "status", Type: migrator.DB_Text, Nullable: true}, // raw status object
{Name: "size", Type: migrator.DB_BigInt, Nullable: false},
{Name: "etag", Type: migrator.DB_NVarchar, Length: 32, Nullable: false, IsLatin: true}, // md5(body)
// Who changed what when
{Name: "created_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "created_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
// Mark objects with origin metadata
{Name: "origin", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "origin_key", Type: migrator.DB_Text, Nullable: false},
{Name: "origin_ts", Type: migrator.DB_BigInt, Nullable: false},
// Metadata
{Name: "title", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "slug", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // from title
{Name: "description", Type: migrator.DB_Text, Nullable: true},
// Commit message
{Name: "message", Type: migrator.DB_Text, Nullable: false}, // defaults to empty string
{Name: "labels", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "fields", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "errors", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "action", Type: migrator.DB_Int, Nullable: false}, // 1: create, 2: update, 3: delete
},
Indices: []*migrator.Index{
// The keys are ordered for efficiency in mysql queries, not URL consistency
{Cols: []string{"namespace", "group", "resource", "name"}, Type: migrator.UniqueIndex}, // == key
{Cols: []string{"folder"}, Type: migrator.IndexType},
},
})
tables = append(tables, migrator.Table{
Name: "entity_history",
Columns: []*migrator.Column{
// only difference from entity table is that we store multiple versions of the same entity
// so we have a unique index on guid+version instead of guid as primary key
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
{Name: "resource_version", Type: migrator.DB_BigInt, Nullable: false},
// The entity identifier (TODO: remove -- this is a duplicate)
{Name: "key", Type: migrator.DB_Text, Nullable: false},
// K8s Identity group+(version)+namespace+resource+name
{Name: "group", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "group_version", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "resource", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "namespace", Type: migrator.DB_NVarchar, Length: 63, Nullable: false},
{Name: "name", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "folder", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // uid of folder
{Name: "access", Type: migrator.DB_Text, Nullable: true}, // JSON object
// The raw entity body (any byte array)
{Name: "meta", Type: migrator.DB_Text, Nullable: true}, // raw meta object from k8s (with standard stuff removed)
{Name: "body", Type: migrator.DB_LongText, Nullable: true}, // null when nested or remote
{Name: "status", Type: migrator.DB_Text, Nullable: true}, // raw status object
{Name: "size", Type: migrator.DB_BigInt, Nullable: false},
{Name: "etag", Type: migrator.DB_NVarchar, Length: 32, Nullable: false, IsLatin: true}, // md5(body)
// Who changed what when
{Name: "created_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "created_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
// Mark objects with origin metadata
{Name: "origin", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "origin_key", Type: migrator.DB_Text, Nullable: false},
{Name: "origin_ts", Type: migrator.DB_BigInt, Nullable: false},
// Metadata
{Name: "title", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "slug", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // from title
{Name: "description", Type: migrator.DB_Text, Nullable: true},
// Commit message
{Name: "message", Type: migrator.DB_Text, Nullable: false}, // defaults to empty string
{Name: "labels", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "fields", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "errors", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "action", Type: migrator.DB_Int, Nullable: false}, // 1: create, 2: update, 3: delete
},
Indices: []*migrator.Index{
{Cols: []string{"guid", "resource_version"}, Type: migrator.UniqueIndex},
{
Cols: []string{"namespace", "group", "resource", "name", "resource_version"},
Type: migrator.UniqueIndex,
Name: "UQE_entity_history_namespace_group_name_version",
},
// index to support watch poller
{Cols: []string{"resource_version"}, Type: migrator.IndexType},
},
})
// when saving a folder, keep a path version cached (all info is derived from entity table)
tables = append(tables, migrator.Table{
Name: "entity_folder",
Columns: []*migrator.Column{
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false, IsPrimaryKey: true},
{Name: "namespace", Type: migrator.DB_NVarchar, Length: 63, Nullable: false},
{Name: "name", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "slug_path", Type: migrator.DB_Text, Nullable: false}, // /slug/slug/slug/
{Name: "tree", Type: migrator.DB_Text, Nullable: false}, // JSON []{uid, title}
{Name: "depth", Type: migrator.DB_Int, Nullable: false}, // starts at 1
{Name: "lft", Type: migrator.DB_Int, Nullable: false}, // MPTT
{Name: "rgt", Type: migrator.DB_Int, Nullable: false}, // MPTT
{Name: "detached", Type: migrator.DB_Bool, Nullable: false}, // a parent folder was not found
},
})
tables = append(tables, migrator.Table{
Name: "entity_labels",
Columns: []*migrator.Column{
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
{Name: "label", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "value", Type: migrator.DB_Text, Nullable: false},
},
Indices: []*migrator.Index{
{Cols: []string{"guid", "label"}, Type: migrator.UniqueIndex},
},
})
tables = append(tables, migrator.Table{
Name: "entity_ref",
Columns: []*migrator.Column{
// Source:
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
// Address (defined in the body, not resolved, may be invalid and change)
{Name: "namespace", Type: migrator.DB_NVarchar, Length: 63, Nullable: false},
{Name: "group", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "resource", Type: migrator.DB_NVarchar, Length: 190, Nullable: true},
{Name: "name", Type: migrator.DB_NVarchar, Length: 190, Nullable: true},
// Runtime calcs (will depend on the system state)
{Name: "resolved_ok", Type: migrator.DB_Bool, Nullable: false},
{Name: "resolved_to", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
{Name: "resolved_warning", Type: migrator.DB_Text, Nullable: false},
{Name: "resolved_time", Type: migrator.DB_DateTime, Nullable: false}, // resolution cache timestamp
},
Indices: []*migrator.Index{
{Cols: []string{"guid"}, Type: migrator.IndexType},
{Cols: []string{"namespace", "group", "resource", "name"}, Type: migrator.IndexType},
{Cols: []string{"resolved_to"}, Type: migrator.IndexType},
},
})
tables = append(tables, migrator.Table{
Name: "kind_version",
Columns: []*migrator.Column{
{Name: "group", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "resource", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "resource_version", Type: migrator.DB_BigInt, Nullable: false},
{Name: "created_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
},
Indices: []*migrator.Index{
{Cols: []string{"group", "resource"}, Type: migrator.UniqueIndex},
},
})
// Initialize all tables
for t := range tables {
mg.AddMigration("drop table "+tables[t].Name, migrator.NewDropTableMigration(tables[t].Name))
mg.AddMigration("create table "+tables[t].Name, migrator.NewAddTableMigration(tables[t]))
for i := range tables[t].Indices {
mg.AddMigration(fmt.Sprintf("create table %s, index: %d", tables[t].Name, i), migrator.NewAddIndexMigration(tables[t], tables[t].Indices[i]))
}
}
return marker
}

View File

@ -1,24 +0,0 @@
package migrations
import (
"xorm.io/xorm"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/setting"
)
func MigrateEntityStore(engine *xorm.Engine, cfg *setting.Cfg, features featuremgmt.FeatureToggles) error {
// Skip if feature flag is not enabled
if !features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorage) {
return nil
}
mg := migrator.NewScopedMigrator(engine, cfg, "entity")
mg.AddCreateMigration()
initEntityTables(mg)
// since it's a new feature enable migration locking by default
return mg.Start(true, 0)
}

View File

@ -1,71 +0,0 @@
package db
import (
"context"
"database/sql"
"xorm.io/xorm"
"github.com/grafana/grafana/pkg/services/sqlstore/session"
"github.com/grafana/grafana/pkg/setting"
)
const (
DriverPostgres = "postgres"
DriverMySQL = "mysql"
DriverSQLite = "sqlite"
DriverSQLite3 = "sqlite3"
)
// EntityDBInterface provides access to a database capable of supporting the
// Entity Server.
type EntityDBInterface interface {
Init() error
GetCfg() *setting.Cfg
GetDB() (DB, error)
// TODO: deprecate.
GetSession() (*session.SessionDB, error)
GetEngine() (*xorm.Engine, error)
}
// DB is a thin abstraction on *sql.DB to allow mocking to provide better unit
// testing. We purposefully hide database operation methods that would use
// context.Background().
type DB interface {
ContextExecer
BeginTx(context.Context, *sql.TxOptions) (Tx, error)
WithTx(context.Context, *sql.TxOptions, TxFunc) error
PingContext(context.Context) error
Stats() sql.DBStats
DriverName() string
}
// TxFunc is a function that executes with access to a transaction. The context
// it receives is the same context used to create the transaction, and is
// provided so that a general prupose TxFunc is able to retrieve information
// from that context, and derive other contexts that may be used to run database
// operation methods accepting a context. A derived context can be used to
// request a specific database operation to take no more than a specific
// fraction of the remaining timeout of the transaction context, or to enrich
// the downstream observability layer with relevant information regarding the
// specific operation being carried out.
type TxFunc = func(context.Context, Tx) error
// Tx is a thin abstraction on *sql.Tx to allow mocking to provide better unit
// testing. We allow database operation methods that do not take a
// context.Context here since a Tx can only be obtained with DB.BeginTx, which
// already takes a context.Context.
type Tx interface {
ContextExecer
Commit() error
Rollback() error
}
// ContextExecer is a set of database operation methods that take
// context.Context.
type ContextExecer interface {
ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error)
QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error)
QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row
}

View File

@ -1,53 +0,0 @@
package dummy
import (
"context"
"fmt"
"github.com/grafana/grafana/pkg/services/store/entity"
)
// Make sure we implement both store + admin
var _ entity.EntityStoreServer = &fakeEntityStore{}
func ProvideFakeEntityServer() entity.EntityStoreServer {
return &fakeEntityStore{}
}
type fakeEntityStore struct{}
func (i fakeEntityStore) IsHealthy(ctx context.Context, r *entity.HealthCheckRequest) (*entity.HealthCheckResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Create(ctx context.Context, r *entity.CreateEntityRequest) (*entity.CreateEntityResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Update(ctx context.Context, r *entity.UpdateEntityRequest) (*entity.UpdateEntityResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Read(ctx context.Context, r *entity.ReadEntityRequest) (*entity.Entity, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Delete(ctx context.Context, r *entity.DeleteEntityRequest) (*entity.DeleteEntityResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) History(ctx context.Context, r *entity.EntityHistoryRequest) (*entity.EntityHistoryResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) List(ctx context.Context, r *entity.EntityListRequest) (*entity.EntityListResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Watch(entity.EntityStore_WatchServer) error {
return fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) FindReferences(ctx context.Context, r *entity.ReferenceRequest) (*entity.EntityListResponse, error) {
return nil, fmt.Errorf("unimplemented")
}

File diff suppressed because it is too large Load Diff

View File

@ -1,474 +0,0 @@
syntax = "proto3";
package entity;
option go_package = "github.com/grafana/grafana/pkg/services/store/entity";
// The canonical entity/document data -- this represents the raw bytes and storage level metadata
message Entity {
// Globally unique ID set by the system. This can not be set explicitly
string guid = 1;
// The resource version, this is a snowflake id controlled by storage
int64 resource_version = 2;
// group
string group = 24;
// kind resource
string resource = 25;
// namespace
string namespace = 26;
// k8s name
string name = 27;
// subresource
string subresource = 28;
// group version
string group_version = 23;
// k8s key value (TODO remove -- it is duplicate of group+resource+version)
string key = 22;
// The folder k8s name
string folder = 4;
// Raw meta from k8s
bytes meta = 5;
// Raw bytes of the storage entity. The kind will determine what is a valid payload
bytes body = 6;
// k8s style status (ignored for now)
bytes status = 7;
// the friendly name of the entity
string title = 8;
// Content Length
int64 size = 9;
// MD5 digest of the body
string ETag = 10;
// Time in epoch milliseconds that the entity was created
int64 created_at = 11;
// Who created the entity
string created_by = 12;
// Time in epoch milliseconds that the entity was updated
int64 updated_at = 13;
// Who updated the entity
string updated_by = 14;
// External location info
EntityOriginInfo origin = 15;
// human-readable description of the entity
string description = 16;
// URL safe version of the name. It will be unique within the folder
string slug = 17;
// Commit message (optional)
string message = 18;
// Key value pairs. Tags are are represented as keys with empty values
map<string,string> labels = 19;
// Optional field values. The schema will define and document possible values for a given kind
map<string, string> fields = 20;
// When errors exist
repeated EntityErrorInfo errors = 21;
// Action code
Action action = 3;
// Status enumeration
enum Action {
UNKNOWN = 0;
CREATED = 1;
UPDATED = 2;
DELETED = 3;
ERROR = 4;
BOOKMARK = 5;
}
}
// This stores additional metadata for items entities that were synced from external systmes
message EntityOriginInfo {
// identify the external source (plugin, git instance, etc)
string source = 1;
// Key in the upstream system (git hash, file path, etc)
string key = 2;
// Time in epoch milliseconds that the entity was last synced with an external system (provisioning/git)
int64 time = 3;
}
// Report error while working with entitys
// NOTE: real systems at scale will contain errors.
message EntityErrorInfo {
// Match an error code registry?
int64 code = 1;
// Simple error display
string message = 2;
// Details encoded in JSON
bytes details_json = 3;
}
//-----------------------------------------------
// Get request/response
//-----------------------------------------------
message ReadEntityRequest {
// Entity identifier
string key = 1;
// Fetch an explicit version (default is latest)
int64 resource_version = 2;
// Include the full body
bool with_body = 4;
// Include the status
bool with_status = 5;
}
//-----------------------------------------------
// Create request/response
//-----------------------------------------------
message CreateEntityRequest {
// Entity details
Entity entity = 1;
}
message CreateEntityResponse {
// Error info -- if exists, the save did not happen
EntityErrorInfo error = 1;
// Entity details
Entity entity = 2;
// Status code
Status status = 3;
// Status enumeration
enum Status {
ERROR = 0;
CREATED = 1;
}
}
//-----------------------------------------------
// Update request/response
//-----------------------------------------------
message UpdateEntityRequest {
// Entity details
Entity entity = 1;
// Used for optimistic locking. If missing, the previous version will be replaced regardless
int64 previous_version = 2;
}
message UpdateEntityResponse {
// Error info -- if exists, the save did not happen
EntityErrorInfo error = 1;
// Entity details
Entity entity = 2;
// Status code
Status status = 3;
// Status enumeration
enum Status {
ERROR = 0;
UPDATED = 1;
UNCHANGED = 2;
}
}
//-----------------------------------------------
// Delete request/response
//-----------------------------------------------
message DeleteEntityRequest {
// Entity identifier
string key = 1;
// Used for optimistic locking. If missing, the current version will be deleted regardless
int64 previous_version = 2;
}
message DeleteEntityResponse {
// Error info -- if exists, the delete did not happen
EntityErrorInfo error = 1;
// Entity details
Entity entity = 2;
// Status code
Status status = 3;
// Status enumeration
enum Status {
ERROR = 0;
DELETED = 1;
NOTFOUND = 2;
}
}
//-----------------------------------------------
// History request/response
//-----------------------------------------------
message EntityHistoryRequest {
// Entity identifier
string key = 1;
// return the history from before this version
int64 before = 2;
// Maximum number of items to return
int64 limit = 3;
// guid of the entity
string guid = 4;
// Starting from the requested page
string next_page_token = 5;
// Sorting instructions `field ASC/DESC`
repeated string sort = 7;
// Return the full body in each payload
bool with_body = 8;
// Return the status in each payload
bool with_status = 10;
}
message EntityHistoryResponse {
// Entity identifier
string key = 1;
// Entity metadata without the raw bytes
repeated Entity versions = 2;
// More results exist... pass this in the next request
string next_page_token = 3;
// Resource version of the response
int64 resource_version = 4;
}
//-----------------------------------------------
// List request/response
//-----------------------------------------------
message EntityListRequest {
// Starting from the requested page (other query parameters must match!)
string next_page_token = 1;
// Maximum number of items to return
int64 limit = 2;
// Free text query string -- mileage may vary :)
string query = 3;
// limit to a specific group (empty is all)
repeated string group = 9;
// limit to a specific resource (empty is all)
repeated string resource = 4;
// limit to a specific key
repeated string key = 11;
// Limit results to items in a specific folder
string folder = 5;
// Must match all labels
map<string,string> labels = 6;
// Sorting instructions `field ASC/DESC`
repeated string sort = 7;
// Return the full body in each payload
bool with_body = 8;
// Return the full body in each payload
bool with_status = 10;
// list deleted entities instead of active ones
bool deleted = 12;
// Deprecated: Limit to a set of origin keys (empty is all)
repeated string origin_keys = 13;
}
message ReferenceRequest {
// Starting from the requested page (other query parameters must match!)
string next_page_token = 1;
// Maximum number of items to return
int64 limit = 2;
// Free text query string -- mileage may vary :)
string namespace = 5;
string group = 6;
string resource = 3;
// Free text query string -- mileage may vary :)
string name = 4;
}
message EntityListResponse {
repeated Entity results = 1;
// More results exist... pass this in the next request
string next_page_token = 2;
// ResourceVersion of the list response
int64 resource_version = 3;
}
//-----------------------------------------------
// Watch request/response
//-----------------------------------------------
message EntityWatchRequest {
enum WatchAction {
START = 0;
STOP = 1;
}
// Start or stop the watch
WatchAction action = 8;
// ResourceVersion of last changes. Empty will default to full history
int64 since = 1;
// Watch specific entities
repeated string key = 2;
// limit to a specific resource (empty is all)
repeated string resource = 3;
// Limit results to items in a specific folder
string folder = 4;
// Must match all labels
map<string,string> labels = 5;
// Return the full body in each payload
bool with_body = 6;
// Return the full status in each payload
bool with_status = 7;
// Return initial events
bool send_initial_events = 9;
bool allow_watch_bookmarks = 10;
}
message EntityWatchResponse {
// Timestamp the event was sent
int64 timestamp = 1;
// Entity that was created, updated, or deleted
Entity entity = 2;
// previous version of the entity
Entity previous = 3;
}
message EntitySummary {
string UID = 1;
string kind = 2;
string name = 3;
string description = 4;
// Key value pairs. Tags are are represented as keys with empty values
map<string,string> labels = 5;
// Parent folder UID
string folder = 6;
// URL safe version of the name. It will be unique within the folder
string slug = 7;
// When errors exist
EntityErrorInfo error = 8;
// Optional field values. The schema will define and document possible values for a given kind
map<string, string> fields = 9;
// eg: panels within dashboard
repeated EntitySummary nested = 10;
// Optional references to external things
repeated EntityExternalReference references = 11;
}
message EntityExternalReference {
// Category of dependency
// eg: datasource, plugin, runtime
string family = 1;
// datasource > prometheus|influx|...
// plugin > panel | datasource
// runtime > transformer
string type = 2;
// datasource > UID
// plugin > plugin identifier
// runtime > name lookup
string identifier = 3;
}
//-----------------------------------------------
// Health
//-----------------------------------------------
message HealthCheckRequest {
string service = 1;
}
message HealthCheckResponse {
enum ServingStatus {
UNKNOWN = 0;
SERVING = 1;
NOT_SERVING = 2;
SERVICE_UNKNOWN = 3; // Used only by the Watch method.
}
ServingStatus status = 1;
}
//-----------------------------------------------
// Storage interface
//-----------------------------------------------
// The entity store provides a basic CRUD (+watch eventually) interface for generic entities
service EntityStore {
rpc Read(ReadEntityRequest) returns (Entity);
rpc Create(CreateEntityRequest) returns (CreateEntityResponse);
rpc Update(UpdateEntityRequest) returns (UpdateEntityResponse);
rpc Delete(DeleteEntityRequest) returns (DeleteEntityResponse);
rpc History(EntityHistoryRequest) returns (EntityHistoryResponse);
rpc List(EntityListRequest) returns (EntityListResponse);
rpc Watch(stream EntityWatchRequest) returns (stream EntityWatchResponse);
rpc IsHealthy(HealthCheckRequest) returns (HealthCheckResponse);
}

View File

@ -1,411 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.4.0
// - protoc (unknown)
// source: entity.proto
package entity
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.62.0 or later.
const _ = grpc.SupportPackageIsVersion8
const (
EntityStore_Read_FullMethodName = "/entity.EntityStore/Read"
EntityStore_Create_FullMethodName = "/entity.EntityStore/Create"
EntityStore_Update_FullMethodName = "/entity.EntityStore/Update"
EntityStore_Delete_FullMethodName = "/entity.EntityStore/Delete"
EntityStore_History_FullMethodName = "/entity.EntityStore/History"
EntityStore_List_FullMethodName = "/entity.EntityStore/List"
EntityStore_Watch_FullMethodName = "/entity.EntityStore/Watch"
EntityStore_IsHealthy_FullMethodName = "/entity.EntityStore/IsHealthy"
)
// EntityStoreClient is the client API for EntityStore service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
//
// The entity store provides a basic CRUD (+watch eventually) interface for generic entities
type EntityStoreClient interface {
Read(ctx context.Context, in *ReadEntityRequest, opts ...grpc.CallOption) (*Entity, error)
Create(ctx context.Context, in *CreateEntityRequest, opts ...grpc.CallOption) (*CreateEntityResponse, error)
Update(ctx context.Context, in *UpdateEntityRequest, opts ...grpc.CallOption) (*UpdateEntityResponse, error)
Delete(ctx context.Context, in *DeleteEntityRequest, opts ...grpc.CallOption) (*DeleteEntityResponse, error)
History(ctx context.Context, in *EntityHistoryRequest, opts ...grpc.CallOption) (*EntityHistoryResponse, error)
List(ctx context.Context, in *EntityListRequest, opts ...grpc.CallOption) (*EntityListResponse, error)
Watch(ctx context.Context, opts ...grpc.CallOption) (EntityStore_WatchClient, error)
IsHealthy(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
}
type entityStoreClient struct {
cc grpc.ClientConnInterface
}
func NewEntityStoreClient(cc grpc.ClientConnInterface) EntityStoreClient {
return &entityStoreClient{cc}
}
func (c *entityStoreClient) Read(ctx context.Context, in *ReadEntityRequest, opts ...grpc.CallOption) (*Entity, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Entity)
err := c.cc.Invoke(ctx, EntityStore_Read_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *entityStoreClient) Create(ctx context.Context, in *CreateEntityRequest, opts ...grpc.CallOption) (*CreateEntityResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreateEntityResponse)
err := c.cc.Invoke(ctx, EntityStore_Create_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *entityStoreClient) Update(ctx context.Context, in *UpdateEntityRequest, opts ...grpc.CallOption) (*UpdateEntityResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(UpdateEntityResponse)
err := c.cc.Invoke(ctx, EntityStore_Update_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *entityStoreClient) Delete(ctx context.Context, in *DeleteEntityRequest, opts ...grpc.CallOption) (*DeleteEntityResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteEntityResponse)
err := c.cc.Invoke(ctx, EntityStore_Delete_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *entityStoreClient) History(ctx context.Context, in *EntityHistoryRequest, opts ...grpc.CallOption) (*EntityHistoryResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(EntityHistoryResponse)
err := c.cc.Invoke(ctx, EntityStore_History_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *entityStoreClient) List(ctx context.Context, in *EntityListRequest, opts ...grpc.CallOption) (*EntityListResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(EntityListResponse)
err := c.cc.Invoke(ctx, EntityStore_List_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *entityStoreClient) Watch(ctx context.Context, opts ...grpc.CallOption) (EntityStore_WatchClient, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &EntityStore_ServiceDesc.Streams[0], EntityStore_Watch_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &entityStoreWatchClient{ClientStream: stream}
return x, nil
}
type EntityStore_WatchClient interface {
Send(*EntityWatchRequest) error
Recv() (*EntityWatchResponse, error)
grpc.ClientStream
}
type entityStoreWatchClient struct {
grpc.ClientStream
}
func (x *entityStoreWatchClient) Send(m *EntityWatchRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *entityStoreWatchClient) Recv() (*EntityWatchResponse, error) {
m := new(EntityWatchResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *entityStoreClient) IsHealthy(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(HealthCheckResponse)
err := c.cc.Invoke(ctx, EntityStore_IsHealthy_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// EntityStoreServer is the server API for EntityStore service.
// All implementations should embed UnimplementedEntityStoreServer
// for forward compatibility
//
// The entity store provides a basic CRUD (+watch eventually) interface for generic entities
type EntityStoreServer interface {
Read(context.Context, *ReadEntityRequest) (*Entity, error)
Create(context.Context, *CreateEntityRequest) (*CreateEntityResponse, error)
Update(context.Context, *UpdateEntityRequest) (*UpdateEntityResponse, error)
Delete(context.Context, *DeleteEntityRequest) (*DeleteEntityResponse, error)
History(context.Context, *EntityHistoryRequest) (*EntityHistoryResponse, error)
List(context.Context, *EntityListRequest) (*EntityListResponse, error)
Watch(EntityStore_WatchServer) error
IsHealthy(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
}
// UnimplementedEntityStoreServer should be embedded to have forward compatible implementations.
type UnimplementedEntityStoreServer struct {
}
func (UnimplementedEntityStoreServer) Read(context.Context, *ReadEntityRequest) (*Entity, error) {
return nil, status.Errorf(codes.Unimplemented, "method Read not implemented")
}
func (UnimplementedEntityStoreServer) Create(context.Context, *CreateEntityRequest) (*CreateEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
}
func (UnimplementedEntityStoreServer) Update(context.Context, *UpdateEntityRequest) (*UpdateEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
}
func (UnimplementedEntityStoreServer) Delete(context.Context, *DeleteEntityRequest) (*DeleteEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
}
func (UnimplementedEntityStoreServer) History(context.Context, *EntityHistoryRequest) (*EntityHistoryResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method History not implemented")
}
func (UnimplementedEntityStoreServer) List(context.Context, *EntityListRequest) (*EntityListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
}
func (UnimplementedEntityStoreServer) Watch(EntityStore_WatchServer) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
func (UnimplementedEntityStoreServer) IsHealthy(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method IsHealthy not implemented")
}
// UnsafeEntityStoreServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to EntityStoreServer will
// result in compilation errors.
type UnsafeEntityStoreServer interface {
mustEmbedUnimplementedEntityStoreServer()
}
func RegisterEntityStoreServer(s grpc.ServiceRegistrar, srv EntityStoreServer) {
s.RegisterService(&EntityStore_ServiceDesc, srv)
}
func _EntityStore_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReadEntityRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).Read(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_Read_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).Read(ctx, req.(*ReadEntityRequest))
}
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateEntityRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).Create(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_Create_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).Create(ctx, req.(*CreateEntityRequest))
}
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateEntityRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).Update(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_Update_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).Update(ctx, req.(*UpdateEntityRequest))
}
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteEntityRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).Delete(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_Delete_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).Delete(ctx, req.(*DeleteEntityRequest))
}
return interceptor(ctx, in, info, handler)
}
func _EntityStore_History_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EntityHistoryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).History(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_History_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).History(ctx, req.(*EntityHistoryRequest))
}
return interceptor(ctx, in, info, handler)
}
func _EntityStore_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EntityListRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).List(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_List_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).List(ctx, req.(*EntityListRequest))
}
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(EntityStoreServer).Watch(&entityStoreWatchServer{ServerStream: stream})
}
type EntityStore_WatchServer interface {
Send(*EntityWatchResponse) error
Recv() (*EntityWatchRequest, error)
grpc.ServerStream
}
type entityStoreWatchServer struct {
grpc.ServerStream
}
func (x *entityStoreWatchServer) Send(m *EntityWatchResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *entityStoreWatchServer) Recv() (*EntityWatchRequest, error) {
m := new(EntityWatchRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _EntityStore_IsHealthy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(HealthCheckRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).IsHealthy(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_IsHealthy_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).IsHealthy(ctx, req.(*HealthCheckRequest))
}
return interceptor(ctx, in, info, handler)
}
// EntityStore_ServiceDesc is the grpc.ServiceDesc for EntityStore service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var EntityStore_ServiceDesc = grpc.ServiceDesc{
ServiceName: "entity.EntityStore",
HandlerType: (*EntityStoreServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Read",
Handler: _EntityStore_Read_Handler,
},
{
MethodName: "Create",
Handler: _EntityStore_Create_Handler,
},
{
MethodName: "Update",
Handler: _EntityStore_Update_Handler,
},
{
MethodName: "Delete",
Handler: _EntityStore_Delete_Handler,
},
{
MethodName: "History",
Handler: _EntityStore_History_Handler,
},
{
MethodName: "List",
Handler: _EntityStore_List_Handler,
},
{
MethodName: "IsHealthy",
Handler: _EntityStore_IsHealthy_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Watch",
Handler: _EntityStore_Watch_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "entity.proto",
}

View File

@ -1,88 +0,0 @@
package entity
import (
"context"
"errors"
"time"
grpcAuth "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/auth"
"google.golang.org/grpc/health/grpc_health_v1"
)
// Compile-time assertion
var _ HealthService = &healthServer{}
type HealthService interface {
grpc_health_v1.HealthServer
grpcAuth.ServiceAuthFuncOverride
}
func ProvideHealthService(server EntityStoreServer) (grpc_health_v1.HealthServer, error) {
h := &healthServer{entityServer: server}
return h, nil
}
type healthServer struct {
entityServer EntityStoreServer
}
// AuthFuncOverride for no auth for health service.
func (s *healthServer) AuthFuncOverride(ctx context.Context, _ string) (context.Context, error) {
return ctx, nil
}
func (s *healthServer) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {
r, err := s.entityServer.IsHealthy(ctx, &HealthCheckRequest{})
if err != nil {
return nil, err
}
return &grpc_health_v1.HealthCheckResponse{
Status: grpc_health_v1.HealthCheckResponse_ServingStatus(r.Status.Number()),
}, nil
}
func (s *healthServer) Watch(req *grpc_health_v1.HealthCheckRequest, stream grpc_health_v1.Health_WatchServer) error {
h, err := s.entityServer.IsHealthy(stream.Context(), &HealthCheckRequest{})
if err != nil {
return err
}
// send initial health status
err = stream.Send(&grpc_health_v1.HealthCheckResponse{
Status: grpc_health_v1.HealthCheckResponse_ServingStatus(h.Status.Number()),
})
if err != nil {
return err
}
currHealth := h.Status.Number()
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// get current health status
h, err := s.entityServer.IsHealthy(stream.Context(), &HealthCheckRequest{})
if err != nil {
return err
}
// if health status has not changed, continue
if h.Status.Number() == currHealth {
continue
}
// send the new health status
currHealth = h.Status.Number()
err = stream.Send(&grpc_health_v1.HealthCheckResponse{
Status: grpc_health_v1.HealthCheckResponse_ServingStatus(h.Status.Number()),
})
if err != nil {
return err
}
case <-stream.Context().Done():
return errors.New("stream closed, context cancelled")
}
}
}

View File

@ -1,156 +0,0 @@
package entity
import (
"context"
"errors"
sync "sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
)
func TestHealthCheck(t *testing.T) {
t.Run("will return serving response when healthy", func(t *testing.T) {
stub := &entityStoreStub{healthResponse: HealthCheckResponse_SERVING}
svc, err := ProvideHealthService(stub)
require.NoError(t, err)
req := &grpc_health_v1.HealthCheckRequest{}
res, err := svc.Check(context.Background(), req)
require.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, res.Status)
})
t.Run("will return not serving response when not healthy", func(t *testing.T) {
stub := &entityStoreStub{healthResponse: HealthCheckResponse_NOT_SERVING}
svc, err := ProvideHealthService(stub)
require.NoError(t, err)
req := &grpc_health_v1.HealthCheckRequest{}
res, err := svc.Check(context.Background(), req)
require.NoError(t, err)
assert.Equal(t, grpc_health_v1.HealthCheckResponse_NOT_SERVING, res.Status)
})
}
func TestHealthWatch(t *testing.T) {
t.Run("watch will return message when called", func(t *testing.T) {
stub := &entityStoreStub{healthResponse: HealthCheckResponse_SERVING}
svc, err := ProvideHealthService(stub)
require.NoError(t, err)
req := &grpc_health_v1.HealthCheckRequest{}
stream := &fakeHealthWatchServer{}
go func() {
err := svc.Watch(req, stream)
require.NoError(t, err)
}()
time.Sleep(100 * time.Millisecond)
err = stream.RecvMsg(nil)
require.NoError(t, err)
})
t.Run("watch will return error when context cancelled", func(t *testing.T) {
stub := &entityStoreStub{healthResponse: HealthCheckResponse_NOT_SERVING}
svc, err := ProvideHealthService(stub)
require.NoError(t, err)
req := &grpc_health_v1.HealthCheckRequest{}
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
stream := &fakeHealthWatchServer{context: ctx}
err = svc.Watch(req, stream)
require.Error(t, err)
})
}
var _ EntityStoreServer = &entityStoreStub{}
type entityStoreStub struct {
healthResponse HealthCheckResponse_ServingStatus
error error
}
func (s *entityStoreStub) IsHealthy(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) {
if s.error != nil {
return nil, s.error
}
return &HealthCheckResponse{Status: s.healthResponse}, nil
}
// Implement the EntityStoreServer methods
func (s *entityStoreStub) Create(ctx context.Context, r *CreateEntityRequest) (*CreateEntityResponse, error) {
return nil, nil
}
func (s *entityStoreStub) Update(ctx context.Context, r *UpdateEntityRequest) (*UpdateEntityResponse, error) {
return nil, nil
}
func (s *entityStoreStub) Read(ctx context.Context, r *ReadEntityRequest) (*Entity, error) {
return nil, nil
}
func (s *entityStoreStub) Delete(ctx context.Context, r *DeleteEntityRequest) (*DeleteEntityResponse, error) {
return nil, nil
}
func (s *entityStoreStub) History(ctx context.Context, r *EntityHistoryRequest) (*EntityHistoryResponse, error) {
return nil, nil
}
func (s *entityStoreStub) List(ctx context.Context, r *EntityListRequest) (*EntityListResponse, error) {
return nil, nil
}
func (s *entityStoreStub) Watch(EntityStore_WatchServer) error {
return nil
}
func (s *entityStoreStub) FindReferences(ctx context.Context, r *ReferenceRequest) (*EntityListResponse, error) {
return nil, nil
}
type fakeHealthWatchServer struct {
mu sync.Mutex
grpc.ServerStream
healthChecks []*grpc_health_v1.HealthCheckResponse
context context.Context
}
func (f *fakeHealthWatchServer) Send(resp *grpc_health_v1.HealthCheckResponse) error {
f.mu.Lock()
defer f.mu.Unlock()
f.healthChecks = append(f.healthChecks, resp)
return nil
}
func (f *fakeHealthWatchServer) RecvMsg(m interface{}) error {
f.mu.Lock()
defer f.mu.Unlock()
if len(f.healthChecks) == 0 {
return errors.New("no health checks received")
}
f.healthChecks = f.healthChecks[1:]
return nil
}
func (f *fakeHealthWatchServer) SendMsg(m interface{}) error {
return errors.New("not implemented")
}
func (f *fakeHealthWatchServer) Context() context.Context {
if f.context == nil {
f.context = context.Background()
}
return f.context
}

View File

@ -8,7 +8,6 @@ import context "context"
const (
StandardKindDashboard = "dashboard"
StandardKindPlaylist = "playlist"
StandardKindFolder = "folder"
// StandardKindDataSource: not a real kind yet, but used to define references from dashboards
@ -19,9 +18,6 @@ const (
// Standalone panel is not an object kind yet -- library panel, or nested in dashboard
StandardKindPanel = "panel"
// StandardKindJSONObj generic json object
StandardKindJSONObj = "jsonobj"
// StandardKindQuery early development on panel query library
// the kind may need to change to better encapsulate { targets:[], transforms:[] }
StandardKindQuery = "query"
@ -51,3 +47,49 @@ const (
// EntitySummaryBuilder will read an object, validate it, and return a summary, sanitized payload, or an error
// This should not include values that depend on system state, only the raw object
type EntitySummaryBuilder = func(ctx context.Context, uid string, body []byte) (*EntitySummary, []byte, error)
type EntitySummary struct {
UID string `protobuf:"bytes,1,opt,name=UID,proto3" json:"UID,omitempty"`
Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
// Key value pairs. Tags are are represented as keys with empty values
Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Parent folder UID
Folder string `protobuf:"bytes,6,opt,name=folder,proto3" json:"folder,omitempty"`
// URL safe version of the name. It will be unique within the folder
Slug string `protobuf:"bytes,7,opt,name=slug,proto3" json:"slug,omitempty"`
// When errors exist
Error *EntityErrorInfo `protobuf:"bytes,8,opt,name=error,proto3" json:"error,omitempty"`
// Optional field values. The schema will define and document possible values for a given kind
Fields map[string]string `protobuf:"bytes,9,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// eg: panels within dashboard
Nested []*EntitySummary `protobuf:"bytes,10,rep,name=nested,proto3" json:"nested,omitempty"`
// Optional references to external things
References []*EntityExternalReference `protobuf:"bytes,11,rep,name=references,proto3" json:"references,omitempty"`
}
// Report error while working with entitys
// NOTE: real systems at scale will contain errors.
type EntityErrorInfo struct {
// Match an error code registry?
Code int64 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
// Simple error display
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
// Details encoded in JSON
DetailsJson []byte `protobuf:"bytes,3,opt,name=details_json,json=detailsJson,proto3" json:"details_json,omitempty"`
}
type EntityExternalReference struct {
// Category of dependency
// eg: datasource, plugin, runtime
Family string `protobuf:"bytes,1,opt,name=family,proto3" json:"family,omitempty"`
// datasource > prometheus|influx|...
// plugin > panel | datasource
// runtime > transformer
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
// datasource > UID
// plugin > plugin identifier
// runtime > name lookup
Identifier string `protobuf:"bytes,3,opt,name=identifier,proto3" json:"identifier,omitempty"`
}

View File

@ -1,52 +0,0 @@
package server
import (
"fmt"
"net"
"strconv"
// "github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/setting"
)
type config struct {
enabled bool
devMode bool
ip net.IP
port int
host string
apiURL string
logLevel int
}
func newConfig(cfg *setting.Cfg) *config {
defaultLogLevel := 0
// TODO
ip := net.ParseIP(cfg.HTTPAddr)
apiURL := cfg.AppURL
port, err := strconv.Atoi(cfg.HTTPPort)
if err != nil {
port = 3001
}
if cfg.Env == setting.Dev {
defaultLogLevel = 10
port = 3001
ip = net.ParseIP("127.0.0.1")
apiURL = fmt.Sprintf("https://%s:%d", ip, port)
}
host := fmt.Sprintf("%s:%d", ip, port)
return &config{
enabled: true,
devMode: cfg.Env == setting.Dev,
ip: ip,
port: port,
host: host,
logLevel: cfg.SectionWithEnvOverrides("storage-server").Key("log_level").MustInt(defaultLogLevel),
apiURL: apiURL,
}
}

View File

@ -1,167 +0,0 @@
package server
import (
"context"
"github.com/grafana/dskit/services"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/modules"
"github.com/grafana/grafana/pkg/registry"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/grpcserver"
"github.com/grafana/grafana/pkg/services/grpcserver/interceptors"
"github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/services/store/entity/db/dbimpl"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/resource/grpc"
)
var (
_ Service = (*service)(nil)
_ registry.BackgroundService = (*service)(nil)
_ registry.CanBeDisabled = (*service)(nil)
)
func init() {
// do nothing
}
type Service interface {
services.NamedService
registry.BackgroundService
registry.CanBeDisabled
}
type service struct {
*services.BasicService
config *config
cfg *setting.Cfg
features featuremgmt.FeatureToggles
stopCh chan struct{}
stoppedCh chan error
handler grpcserver.Provider
tracing *tracing.TracingService
authenticator interceptors.Authenticator
log log.Logger
}
func ProvideService(
cfg *setting.Cfg,
features featuremgmt.FeatureToggles,
log log.Logger,
) (*service, error) {
tracingCfg, err := tracing.ProvideTracingConfig(cfg)
if err != nil {
return nil, err
}
tracingCfg.ServiceName = "unified-storage"
tracing, err := tracing.ProvideService(tracingCfg)
if err != nil {
return nil, err
}
authn := &grpc.Authenticator{}
s := &service{
config: newConfig(cfg),
cfg: cfg,
features: features,
stopCh: make(chan struct{}),
authenticator: authn,
tracing: tracing,
log: log,
}
// This will be used when running as a dskit service
s.BasicService = services.NewBasicService(s.start, s.running, nil).WithName(modules.StorageServer)
return s, nil
}
func (s *service) IsDisabled() bool {
return !s.config.enabled
}
// Run is an adapter for the BackgroundService interface.
func (s *service) Run(ctx context.Context) error {
if err := s.start(ctx); err != nil {
return err
}
return s.running(ctx)
}
func (s *service) start(ctx context.Context) error {
// TODO: use wire
// TODO: support using grafana db connection?
eDB, err := dbimpl.ProvideEntityDB(nil, s.cfg, s.features, s.tracing)
if err != nil {
return err
}
err = eDB.Init()
if err != nil {
return err
}
store, err := sqlstash.ProvideSQLEntityServer(eDB, s.tracing)
if err != nil {
return err
}
s.handler, err = grpcserver.ProvideService(s.cfg, s.features, s.authenticator, s.tracing, prometheus.DefaultRegisterer)
if err != nil {
return err
}
healthService, err := entity.ProvideHealthService(store)
if err != nil {
return err
}
entity.RegisterEntityStoreServer(s.handler.GetServer(), store)
grpc_health_v1.RegisterHealthServer(s.handler.GetServer(), healthService)
// register reflection service
_, err = grpcserver.ProvideReflectionService(s.cfg, s.handler)
if err != nil {
return err
}
err = s.handler.Run(ctx)
if err != nil {
return err
}
return nil
}
func (s *service) running(ctx context.Context) error {
// skip waiting for the server in prod mode
if !s.config.devMode {
<-ctx.Done()
return nil
}
select {
case err := <-s.stoppedCh:
if err != nil {
return err
}
case <-ctx.Done():
close(s.stopCh)
}
return nil
}

View File

@ -1,134 +0,0 @@
package sqlstash
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
folder "github.com/grafana/grafana/pkg/apis/folder/v0alpha1"
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
"github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
)
func (s *sqlEntityServer) Create(ctx context.Context, r *entity.CreateEntityRequest) (*entity.CreateEntityResponse, error) {
ctx, span := s.tracer.Start(ctx, "storage_server.Create")
defer span.End()
if err := s.Init(); err != nil {
return nil, err
}
key, err := grafanaregistry.ParseKey(r.Entity.Key)
if err != nil {
return nil, fmt.Errorf("create entity: parse entity key: %w", err)
}
// validate and process the request to get the information we need to run
// the query
newEntity, err := entityForCreate(ctx, r, key)
if err != nil {
return nil, fmt.Errorf("create entity: entity from create entity request: %w", err)
}
err = s.sqlDB.WithTx(ctx, ReadCommitted, func(ctx context.Context, tx db.Tx) error {
if len(newEntity.Entity.Labels) > 0 {
// Pre-locking: register this entity's labels
insLabels := sqlEntityLabelsInsertRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
GUID: newEntity.Guid,
Labels: newEntity.Entity.Labels,
}
if _, err = exec(ctx, tx, sqlEntityLabelsInsert, insLabels); err != nil {
return fmt.Errorf("insert into entity_labels: %w", err)
}
}
// up to this point, we have done all the work possible before having to
// lock kind_version
// 1. Atomically increpement resource version for this kind
newVersion, err := kindVersionAtomicInc(ctx, tx, s.sqlDialect, key.Group, key.Resource)
if err != nil {
return err
}
newEntity.ResourceVersion = newVersion
// 2. Insert into entity
insEntity := sqlEntityInsertRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
Entity: newEntity,
TableEntity: true,
}
if _, err = exec(ctx, tx, sqlEntityInsert, insEntity); err != nil {
return fmt.Errorf("insert into entity: %w", err)
}
// 3. Insert into entity history
insEntityHistory := sqlEntityInsertRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
Entity: newEntity,
}
if _, err = exec(ctx, tx, sqlEntityInsert, insEntityHistory); err != nil {
return fmt.Errorf("insert into entity_history: %w", err)
}
// 4. Rebuild the whole folder tree structure if we're creating a folder
if newEntity.Group == folder.GROUP && newEntity.Resource == folder.RESOURCE {
if err = s.updateFolderTree(ctx, tx, key.Namespace); err != nil {
return fmt.Errorf("rebuild folder tree structure: %w", err)
}
}
return nil
})
if err != nil {
// TODO: should we define the "Error" field here and how? (i.e. how
// to determine what information can be disclosed to the user?)
return nil, fmt.Errorf("create entity: %w", err)
}
return &entity.CreateEntityResponse{
Entity: newEntity.Entity,
Status: entity.CreateEntityResponse_CREATED,
}, nil
}
// entityForCreate validates the given request and returns a *returnsEntity
// populated accordingly.
func entityForCreate(ctx context.Context, r *entity.CreateEntityRequest, key *grafanaregistry.Key) (*returnsEntity, error) {
newEntity := &returnsEntity{
Entity: cloneEntity(r.Entity),
}
if err := newEntity.marshal(); err != nil {
return nil, fmt.Errorf("serialize entity data for db: %w", err)
}
createdAt := time.Now().UnixMilli()
createdBy, err := getCurrentUser(ctx)
if err != nil {
return nil, err
}
newEntity.Guid = uuid.New().String()
newEntity.Group = key.Group
newEntity.Resource = key.Resource
newEntity.Namespace = key.Namespace
newEntity.Name = key.Name
newEntity.Size = int64(len(r.Entity.Body))
newEntity.ETag = createETag(r.Entity.Body, r.Entity.Meta, r.Entity.Status)
newEntity.CreatedAt = createdAt
newEntity.CreatedBy = createdBy
newEntity.UpdatedAt = createdAt
newEntity.UpdatedBy = createdBy
newEntity.Action = entity.Entity_CREATED
return newEntity, nil
}

View File

@ -1,61 +0,0 @@
{{/*
This is the list of all the fields in *entity.Entity, in a way that is
suitable to be imported by other templates that need to select these fields
from either the "entity" or the "entity_history" tables.
Example usage:
SELECT {{ template "common_entity_select_into" . }}
FROM {{ .Ident "entity" }} AS e
*/}}
{{ define "common_entity_select_into" }}
e.{{ .Ident "guid" | .Into .Entity.Guid }},
e.{{ .Ident "resource_version" | .Into .Entity.ResourceVersion }},
e.{{ .Ident "key" | .Into .Entity.Key }},
e.{{ .Ident "group" | .Into .Entity.Group }},
e.{{ .Ident "group_version" | .Into .Entity.GroupVersion }},
e.{{ .Ident "resource" | .Into .Entity.Resource }},
e.{{ .Ident "namespace" | .Into .Entity.Namespace }},
e.{{ .Ident "name" | .Into .Entity.Name }},
e.{{ .Ident "folder" | .Into .Entity.Folder }},
e.{{ .Ident "meta" | .Into .Entity.Meta }},
e.{{ .Ident "body" | .Into .Entity.Body }},
e.{{ .Ident "status" | .Into .Entity.Status }},
e.{{ .Ident "size" | .Into .Entity.Size }},
e.{{ .Ident "etag" | .Into .Entity.ETag }},
e.{{ .Ident "created_at" | .Into .Entity.CreatedAt }},
e.{{ .Ident "created_by" | .Into .Entity.CreatedBy }},
e.{{ .Ident "updated_at" | .Into .Entity.UpdatedAt }},
e.{{ .Ident "updated_by" | .Into .Entity.UpdatedBy }},
e.{{ .Ident "origin" | .Into .Entity.Origin.Source }},
e.{{ .Ident "origin_key" | .Into .Entity.Origin.Key }},
e.{{ .Ident "origin_ts" | .Into .Entity.Origin.Time }},
e.{{ .Ident "title" | .Into .Entity.Title }},
e.{{ .Ident "slug" | .Into .Entity.Slug }},
e.{{ .Ident "description" | .Into .Entity.Description }},
e.{{ .Ident "message" | .Into .Entity.Message }},
e.{{ .Ident "labels" | .Into .Entity.Labels }},
e.{{ .Ident "fields" | .Into .Entity.Fields }},
e.{{ .Ident "errors" | .Into .Entity.Errors }},
e.{{ .Ident "action" | .Into .Entity.Action }}
{{ end }}
{{/* Build an ORDER BY clause from a []SortBy contained in a .Sort field */}}
{{ define "common_order_by" }}
{{ $comma := listSep ", " }}
{{ range .Sort }}
{{- call $comma -}} {{ $.Ident .Field }} {{ .Direction.String }}
{{ end }}
{{ end }}

View File

@ -1,7 +0,0 @@
DELETE FROM {{ .Ident "entity" }}
WHERE 1 = 1
AND {{ .Ident "namespace" }} = {{ .Arg .Key.Namespace }}
AND {{ .Ident "group" }} = {{ .Arg .Key.Group }}
AND {{ .Ident "resource" }} = {{ .Arg .Key.Resource }}
AND {{ .Ident "name" }} = {{ .Arg .Key.Name }}
;

View File

@ -1,29 +0,0 @@
INSERT INTO {{ .Ident "entity_folder" }}
(
{{ .Ident "guid" }},
{{ .Ident "namespace" }},
{{ .Ident "name" }},
{{ .Ident "slug_path" }},
{{ .Ident "tree" }},
{{ .Ident "depth" }},
{{ .Ident "lft" }},
{{ .Ident "rgt" }},
{{ .Ident "detached" }}
)
VALUES
{{ $comma := listSep ", " }}
{{ range .Items }}
{{- call $comma -}} (
{{ $.Arg .GUID }},
{{ $.Arg .Namespace }},
{{ $.Arg .UID }},
{{ $.Arg .SlugPath }},
{{ $.Arg .JS }},
{{ $.Arg .Depth }},
{{ $.Arg .Left }},
{{ $.Arg .Right }},
{{ $.Arg .Detached }}
)
{{ end }}
;

View File

@ -1,93 +0,0 @@
INSERT INTO
{{/* Determine which table to insert into */}}
{{ if .TableEntity }} {{ .Ident "entity" }}
{{ else }} {{ .Ident "entity_history" }}
{{ end }}
{{/* Explicitly specify fields that will be set */}}
(
{{ .Ident "guid" }},
{{ .Ident "resource_version" }},
{{ .Ident "key" }},
{{ .Ident "group" }},
{{ .Ident "group_version" }},
{{ .Ident "resource" }},
{{ .Ident "namespace" }},
{{ .Ident "name" }},
{{ .Ident "folder" }},
{{ .Ident "meta" }},
{{ .Ident "body" }},
{{ .Ident "status" }},
{{ .Ident "size" }},
{{ .Ident "etag" }},
{{ .Ident "created_at" }},
{{ .Ident "created_by" }},
{{ .Ident "updated_at" }},
{{ .Ident "updated_by" }},
{{ .Ident "origin" }},
{{ .Ident "origin_key" }},
{{ .Ident "origin_ts" }},
{{ .Ident "title" }},
{{ .Ident "slug" }},
{{ .Ident "description" }},
{{ .Ident "message" }},
{{ .Ident "labels" }},
{{ .Ident "fields" }},
{{ .Ident "errors" }},
{{ .Ident "action" }}
)
{{/* Provide the values */}}
VALUES (
{{ .Arg .Entity.Guid }},
{{ .Arg .Entity.ResourceVersion }},
{{ .Arg .Entity.Key }},
{{ .Arg .Entity.Group }},
{{ .Arg .Entity.GroupVersion }},
{{ .Arg .Entity.Resource }},
{{ .Arg .Entity.Namespace }},
{{ .Arg .Entity.Name }},
{{ .Arg .Entity.Folder }},
{{ .Arg .Entity.Meta }},
{{ .Arg .Entity.Body }},
{{ .Arg .Entity.Status }},
{{ .Arg .Entity.Size }},
{{ .Arg .Entity.ETag }},
{{ .Arg .Entity.CreatedAt }},
{{ .Arg .Entity.CreatedBy }},
{{ .Arg .Entity.UpdatedAt }},
{{ .Arg .Entity.UpdatedBy }},
{{ .Arg .Entity.Origin.Source }},
{{ .Arg .Entity.Origin.Key }},
{{ .Arg .Entity.Origin.Time }},
{{ .Arg .Entity.Title }},
{{ .Arg .Entity.Slug }},
{{ .Arg .Entity.Description }},
{{ .Arg .Entity.Message }},
{{ .Arg .Entity.Labels }},
{{ .Arg .Entity.Fields }},
{{ .Arg .Entity.Errors }},
{{ .Arg .Entity.Action }}
)
;

View File

@ -1,7 +0,0 @@
DELETE FROM {{ .Ident "entity_labels" }}
WHERE 1 = 1
AND {{ .Ident "guid" }} = {{ .Arg .GUID }}
{{ if gt (len .KeepLabels) 0 }}
AND {{ .Ident "label" }} NOT IN ( {{ .ArgList .KeepLabels }} )
{{ end }}
;

View File

@ -1,17 +0,0 @@
INSERT INTO {{ .Ident "entity_labels" }}
(
{{ .Ident "guid" }},
{{ .Ident "label" }},
{{ .Ident "value" }}
)
VALUES
{{ $comma := listSep ", " }}
{{ range $name, $value := .Labels }}
{{- call $comma -}} (
{{ $.Arg $.GUID }},
{{ $.Arg $name }},
{{ $.Arg $value }}
)
{{ end }}
;

View File

@ -1,14 +0,0 @@
SELECT
{{ .Ident "guid" | .Into .FolderInfo.GUID }},
{{ .Ident "name" | .Into .FolderInfo.UID }},
{{ .Ident "folder" | .Into .FolderInfo.ParentUID }},
{{ .Ident "name" | .Into .FolderInfo.Name }},
{{ .Ident "slug" | .Into .FolderInfo.Slug }}
FROM {{ .Ident "entity" }}
WHERE 1 = 1
AND {{ .Ident "group" }} = {{ .Arg .Group }}
AND {{ .Ident "resource" }} = {{ .Arg .Resource }}
AND {{ .Ident "namespace" }} = {{ .Arg .Namespace }}
;

View File

@ -1,39 +0,0 @@
SELECT {{ template "common_entity_select_into" . }}
FROM
{{ if gt .ResourceVersion 0 }}
{{ .Ident "entity_history" }} AS e
{{ else }}
{{ .Ident "entity" }} AS e
{{ end }}
WHERE 1 = 1
AND {{ .Ident "namespace" }} = {{ .Arg .Key.Namespace }}
AND {{ .Ident "group" }} = {{ .Arg .Key.Group }}
AND {{ .Ident "resource" }} = {{ .Arg .Key.Resource }}
AND {{ .Ident "name" }} = {{ .Arg .Key.Name }}
{{/*
Resource versions work like snapshots at the kind level. Thus, a request
to retrieve a specific resource version should be interpreted as asking
for a resource as of how it existed at that point in time. This is why we
request matching entities with at most the provided resource version, and
return only the one with the highest resource version. In the case of not
specifying a resource version (i.e. resource version zero), it is
interpreted as the latest version of the given entity, thus we instead
query the "entity" table (which holds only the latest version of
non-deleted entities) and we don't need to specify anything else. The
"entity" table has a unique constraint on (namespace, group, resource,
name), so we're guaranteed to have at most one matching row.
*/}}
{{ if gt .ResourceVersion 0 }}
AND {{ .Ident "resource_version" }} <= {{ .Arg .ResourceVersion }}
ORDER BY {{ .Ident "resource_version" }} DESC
LIMIT 1
{{ end }}
{{ if .SelectForUpdate }}
{{ .SelectFor "UPDATE NOWAIT" }}
{{ end }}
;

View File

@ -1,34 +0,0 @@
UPDATE {{ .Ident "entity" }} SET
{{ .Ident "resource_version" }} = {{ .Arg .Entity.ResourceVersion }},
{{ .Ident "group_version" }} = {{ .Arg .Entity.GroupVersion }},
{{ .Ident "folder" }} = {{ .Arg .Entity.Folder }},
{{ .Ident "meta" }} = {{ .Arg .Entity.Meta }},
{{ .Ident "body" }} = {{ .Arg .Entity.Body }},
{{ .Ident "status" }} = {{ .Arg .Entity.Status }},
{{ .Ident "size" }} = {{ .Arg .Entity.Size }},
{{ .Ident "etag" }} = {{ .Arg .Entity.ETag }},
{{ .Ident "updated_at" }} = {{ .Arg .Entity.UpdatedAt }},
{{ .Ident "updated_by" }} = {{ .Arg .Entity.UpdatedBy }},
{{ .Ident "origin" }} = {{ .Arg .Entity.Origin.Source }},
{{ .Ident "origin_key" }} = {{ .Arg .Entity.Origin.Key }},
{{ .Ident "origin_ts" }} = {{ .Arg .Entity.Origin.Time }},
{{ .Ident "title" }} = {{ .Arg .Entity.Title }},
{{ .Ident "slug" }} = {{ .Arg .Entity.Slug }},
{{ .Ident "description" }} = {{ .Arg .Entity.Description }},
{{ .Ident "message" }} = {{ .Arg .Entity.Message }},
{{ .Ident "labels" }} = {{ .Arg .Entity.Labels }},
{{ .Ident "fields" }} = {{ .Arg .Entity.Fields }},
{{ .Ident "errors" }} = {{ .Arg .Entity.Errors }},
{{ .Ident "action" }} = {{ .Arg .Entity.Action }}
WHERE {{ .Ident "guid" }} = {{ .Arg .Entity.Guid }}
;

View File

@ -1,10 +0,0 @@
SELECT
{{ .Ident "resource_version" | .Into .ResourceVersion }},
{{ .Ident "created_at" | .Into .ResourceVersion }},
{{ .Ident "updated_at" | .Into .ResourceVersion }}
FROM {{ .Ident "kind_version" }}
WHERE 1 = 1
AND {{ .Ident "group" }} = {{ .Arg .Group }}
AND {{ .Ident "resource" }} = {{ .Arg .Resource }}
;

View File

@ -1,10 +0,0 @@
UPDATE {{ .Ident "kind_version" }}
SET
{{ .Ident "resource_version" }} = {{ .Arg .ResourceVersion }} + 1,
{{ .Ident "updated_at" }} = {{ .Arg .UpdatedAt }}
WHERE 1 = 1
AND {{ .Ident "group" }} = {{ .Arg .Group }}
AND {{ .Ident "resource" }} = {{ .Arg .Resource }}
AND {{ .Ident "resource_version" }} = {{ .Arg .ResourceVersion }}
;

View File

@ -1,17 +0,0 @@
INSERT INTO {{ .Ident "kind_version" }}
(
{{ .Ident "group" }},
{{ .Ident "resource" }},
{{ .Ident "resource_version" }},
{{ .Ident "created_at" }},
{{ .Ident "updated_at" }}
)
VALUES (
{{ .Arg .Group }},
{{ .Arg .Resource }},
1,
{{ .Arg .CreatedAt }},
{{ .Arg .UpdatedAt }}
)
;

View File

@ -1,7 +0,0 @@
SELECT {{ .Ident "resource_version" | .Into .ResourceVersion }}
FROM {{ .Ident "kind_version" }}
WHERE 1 = 1
AND {{ .Ident "group" }} = {{ .Arg .Group }}
AND {{ .Ident "resource" }} = {{ .Arg .Resource }}
{{ .SelectFor "UPDATE" }}
;

View File

@ -1,118 +0,0 @@
package sqlstash
import (
"context"
"errors"
"fmt"
"time"
folder "github.com/grafana/grafana/pkg/apis/folder/v0alpha1"
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
"github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
)
func (s *sqlEntityServer) Delete(ctx context.Context, r *entity.DeleteEntityRequest) (*entity.DeleteEntityResponse, error) {
ctx, span := s.tracer.Start(ctx, "storage_server.Delete")
defer span.End()
if err := s.Init(); err != nil {
return nil, err
}
key, err := grafanaregistry.ParseKey(r.Key)
if err != nil {
return nil, fmt.Errorf("delete entity: parse entity key: %w", err)
}
updatedBy, err := getCurrentUser(ctx)
if err != nil {
return nil, fmt.Errorf("delete entity: %w", err)
}
ret := new(entity.DeleteEntityResponse)
err = s.sqlDB.WithTx(ctx, ReadCommitted, func(ctx context.Context, tx db.Tx) error {
// Pre-locking: get the latest version of the entity
previous, err := readEntity(ctx, tx, s.sqlDialect, key, r.PreviousVersion, true, false)
if errors.Is(err, ErrNotFound) {
ret.Status = entity.DeleteEntityResponse_NOTFOUND
return nil
}
if err != nil {
return err
}
// Pre-locking: remove this entity's labels
delLabelsReq := sqlEntityLabelsDeleteRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
GUID: previous.Guid,
}
if _, err = exec(ctx, tx, sqlEntityLabelsDelete, delLabelsReq); err != nil {
return fmt.Errorf("delete all labels of entity with guid %q: %w",
previous.Guid, err)
}
// TODO: Pre-locking: remove this entity's refs from `entity_ref`
// Pre-locking: delete from "entity"
delEntityReq := sqlEntityDeleteRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
Key: key,
}
if _, err = exec(ctx, tx, sqlEntityDelete, delEntityReq); err != nil {
return fmt.Errorf("delete entity with key %#v: %w", key, err)
}
// Pre-locking: rebuild the whole folder tree structure if we're
// deleting a folder
if previous.Group == folder.GROUP && previous.Resource == folder.RESOURCE {
if err = s.updateFolderTree(ctx, tx, key.Namespace); err != nil {
return fmt.Errorf("rebuild folder tree structure: %w", err)
}
}
// up to this point, we have done all the work possible before having to
// lock kind_version
// 1. Atomically increpement resource version for this kind
newVersion, err := kindVersionAtomicInc(ctx, tx, s.sqlDialect, key.Group, key.Resource)
if err != nil {
return err
}
// k8s expects us to return the entity as it was before the deletion,
// but with the updated RV
previous.ResourceVersion = newVersion
// build the new row to be inserted
deletedVersion := *previous // copy marshaled data since it won't change
deletedVersion.Entity = cloneEntity(previous.Entity) // clone entity
deletedVersion.Action = entity.Entity_DELETED
deletedVersion.UpdatedAt = time.Now().UnixMilli()
deletedVersion.UpdatedBy = updatedBy
// 2. Insert into entity history
insEntity := sqlEntityInsertRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
Entity: &deletedVersion,
}
if _, err = exec(ctx, tx, sqlEntityInsert, insEntity); err != nil {
return fmt.Errorf("insert into entity_history: %w", err)
}
// success
ret.Status = entity.DeleteEntityResponse_DELETED
ret.Entity = previous.Entity
return nil
})
if err != nil {
// TODO: should we populate the Error field and how? (i.e. how to
// determine what information can be disclosed to the user?)
return nil, fmt.Errorf("delete entity: %w", err)
}
return ret, nil
}

View File

@ -1,186 +0,0 @@
package sqlstash
import (
"context"
"encoding/json"
"fmt"
"strings"
folder "github.com/grafana/grafana/pkg/apis/folder/v0alpha1"
"github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
)
type folderInfo struct {
GUID string `json:"guid"`
UID string `json:"uid"`
Name string `json:"name"` // original display name
SlugPath string `json:"slug"` // full slug path
// original slug
Slug string `json:"-"`
depth int32
left int32
right int32
// Build the tree
ParentUID string
// Calculated after query
parent *folderInfo
children []*folderInfo
stack []*folderInfo
}
func (fi *folderInfo) buildInsertItems(items *[]*sqlEntityFolderInsertRequestItem, namespace string, isLost bool) error {
var js strings.Builder
if err := json.NewEncoder(&js).Encode(fi.stack); err != nil {
return fmt.Errorf("marshal stack of folder %q to JSON: %w", fi.SlugPath, err)
}
*items = append(*items, &sqlEntityFolderInsertRequestItem{
GUID: fi.GUID,
Namespace: namespace,
UID: fi.UID,
SlugPath: fi.SlugPath,
JS: js.String(),
Depth: fi.depth,
Left: fi.left,
Right: fi.right,
Detached: isLost,
})
for _, sub := range fi.children {
if err := sub.buildInsertItems(items, namespace, isLost); err != nil {
return nil
}
}
return nil
}
// This rebuilds the whole folders structure for a given namespace. This has to
// be done each time an entity is created or deleted.
// FIXME: This is very inefficient and time consuming. This could be implemented
// with a different approach instead of MPTT, or at least mitigated by an async
// job?
// FIXME: This algorithm apparently allows lost trees which are called
// "detached"? We should probably migrate to something safer.
func (s *sqlEntityServer) updateFolderTree(ctx context.Context, x db.ContextExecer, namespace string) error {
_, err := x.ExecContext(ctx, "DELETE FROM entity_folder WHERE namespace=?", namespace)
if err != nil {
return fmt.Errorf("clear entity_folder for namespace %q: %w", namespace, err)
}
listReq := sqlEntityListFolderElementsRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
Group: folder.GROUP,
Resource: folder.RESOURCE,
Namespace: namespace,
FolderInfo: new(folderInfo),
}
query, err := sqltemplate.Execute(sqlEntityListFolderElements, listReq)
if err != nil {
return fmt.Errorf("execute SQL template to list folder items in namespace %q: %w", namespace, err)
}
rows, err := x.QueryContext(ctx, query, listReq.GetArgs()...)
if err != nil {
return fmt.Errorf("list folder items in namespace %q: %w", namespace, err)
}
var itemList []*folderInfo
for i := 1; rows.Next(); i++ {
if err := rows.Scan(listReq.GetScanDest()...); err != nil {
return fmt.Errorf("scan row #%d listing folder items in namespace %q: %w", i, namespace, err)
}
fi := *listReq.FolderInfo
itemList = append(itemList, &fi)
}
if err := rows.Err(); err != nil {
return fmt.Errorf("rows error after listing folder items in namespace %q: %w", namespace, err)
}
root, lost, err := buildFolderTree(itemList)
if err != nil {
return fmt.Errorf("build folder tree for namespace %q: %w", namespace, err)
}
var insertItems []*sqlEntityFolderInsertRequestItem
if err = root.buildInsertItems(&insertItems, namespace, false); err != nil {
return fmt.Errorf("build insert items for root tree in namespace %q: %w", namespace, err)
}
for i, lostItem := range lost {
if err = lostItem.buildInsertItems(&insertItems, namespace, false); err != nil {
return fmt.Errorf("build insert items for lost folder #%d tree in namespace %q: %w", i, namespace, err)
}
}
insReq := sqlEntityFolderInsertRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
Items: insertItems,
}
if _, err = exec(ctx, x, sqlEntityFolderInsert, insReq); err != nil {
return fmt.Errorf("insert rebuilt tree for namespace %q: %w", namespace, err)
}
return nil
}
func buildFolderTree(all []*folderInfo) (*folderInfo, []*folderInfo, error) {
lost := []*folderInfo{}
lookup := make(map[string]*folderInfo)
for _, folder := range all {
lookup[folder.UID] = folder
}
root := &folderInfo{
Name: "Root",
UID: "",
children: []*folderInfo{},
left: 1,
}
lookup[""] = root
// already sorted by slug
for _, folder := range all {
parent, ok := lookup[folder.ParentUID]
if ok {
folder.parent = parent
parent.children = append(parent.children, folder)
} else {
lost = append(lost, folder)
}
}
_, err := setMPTTOrder(root, []*folderInfo{}, int32(1))
return root, lost, err
}
// https://imrannazar.com/Modified-Preorder-Tree-Traversal
func setMPTTOrder(folder *folderInfo, stack []*folderInfo, idx int32) (int32, error) {
var err error
folder.depth = int32(len(stack))
folder.left = idx
folder.stack = stack
if folder.depth > 0 {
folder.SlugPath = "/"
for _, f := range stack {
folder.SlugPath += f.Slug + "/"
}
}
for _, child := range folder.children {
idx, err = setMPTTOrder(child, append(stack, child), idx+1)
if err != nil {
return idx, err
}
}
folder.right = idx + 1
return folder.right, nil
}

View File

@ -1,64 +0,0 @@
package sqlstash
import (
_ "embed"
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/experimental"
)
func TestFolderSupport(t *testing.T) {
root, lost, err := buildFolderTree([]*folderInfo{
{GUID: "GA", UID: "A", ParentUID: "", Name: "A", Slug: "a"},
{GUID: "GAA", UID: "AA", ParentUID: "A", Name: "AA", Slug: "aa"},
{GUID: "GB", UID: "B", ParentUID: "", Name: "B", Slug: "b"},
})
require.NoError(t, err)
require.NotNil(t, root)
require.NotNil(t, lost)
require.Empty(t, lost)
frame := treeToFrame(root)
experimental.CheckGoldenJSONFrame(t, "testdata", "simple", frame, true)
}
func treeToFrame(root *folderInfo) *data.Frame {
frame := data.NewFrame("",
data.NewFieldFromFieldType(data.FieldTypeString, 0), // UID
data.NewFieldFromFieldType(data.FieldTypeString, 0), // Name
data.NewFieldFromFieldType(data.FieldTypeString, 0), // Slug
data.NewFieldFromFieldType(data.FieldTypeInt32, 0), // Depth
data.NewFieldFromFieldType(data.FieldTypeInt32, 0), // Left
data.NewFieldFromFieldType(data.FieldTypeInt32, 0), // Right
data.NewFieldFromFieldType(data.FieldTypeJSON, 0), // Tree
)
frame.Fields[0].Name = "UID"
frame.Fields[1].Name = "name"
frame.Fields[2].Name = "slug"
frame.Fields[3].Name = "depth"
frame.Fields[4].Name = "left"
frame.Fields[5].Name = "right"
frame.Fields[6].Name = "tree"
appendFolder(root, frame)
return frame
}
func appendFolder(folder *folderInfo, frame *data.Frame) {
b, _ := json.Marshal(folder.stack)
frame.AppendRow(
folder.UID,
folder.Name,
folder.SlugPath,
folder.depth,
folder.left,
folder.right,
json.RawMessage(b),
)
for _, sub := range folder.children {
appendFolder(sub, frame)
}
}

View File

@ -1,41 +0,0 @@
package sqlstash
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
)
var (
once sync.Once
StorageServerMetrics *StorageApiMetrics
)
type StorageApiMetrics struct {
OptimisticLockFailed *prometheus.CounterVec
}
func NewStorageMetrics() *StorageApiMetrics {
once.Do(func() {
StorageServerMetrics = &StorageApiMetrics{
OptimisticLockFailed: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "storage_server",
Name: "optimistic_lock_failed",
Help: "count of optimistic locks failed",
},
[]string{"action"},
),
}
})
return StorageServerMetrics
}
func (s *StorageApiMetrics) Collect(ch chan<- prometheus.Metric) {
s.OptimisticLockFailed.Collect(ch)
}
func (s *StorageApiMetrics) Describe(ch chan<- *prometheus.Desc) {
s.OptimisticLockFailed.Describe(ch)
}

View File

@ -1,612 +0,0 @@
package sqlstash
import (
"context"
"database/sql"
"embed"
"encoding/json"
"errors"
"fmt"
"strings"
"text/template"
"time"
"google.golang.org/protobuf/proto"
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
"github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
)
// Templates setup.
var (
//go:embed data/*.sql
sqlTemplatesFS embed.FS
// all templates
helpers = template.FuncMap{
"listSep": helperListSep,
"join": helperJoin,
}
sqlTemplates = template.Must(template.New("sql").Funcs(helpers).ParseFS(sqlTemplatesFS, `data/*.sql`))
)
func mustTemplate(filename string) *template.Template {
if t := sqlTemplates.Lookup(filename); t != nil {
return t
}
panic(fmt.Sprintf("template file not found: %s", filename))
}
// Templates.
var (
sqlEntityDelete = mustTemplate("entity_delete.sql")
sqlEntityInsert = mustTemplate("entity_insert.sql")
sqlEntityListFolderElements = mustTemplate("entity_list_folder_elements.sql")
sqlEntityRead = mustTemplate("entity_read.sql")
sqlEntityUpdate = mustTemplate("entity_update.sql")
sqlEntityFolderInsert = mustTemplate("entity_folder_insert.sql")
sqlEntityLabelsDelete = mustTemplate("entity_labels_delete.sql")
sqlEntityLabelsInsert = mustTemplate("entity_labels_insert.sql")
sqlKindVersionGet = mustTemplate("kind_version_get.sql")
sqlKindVersionInc = mustTemplate("kind_version_inc.sql")
sqlKindVersionInsert = mustTemplate("kind_version_insert.sql")
sqlKindVersionLock = mustTemplate("kind_version_lock.sql")
)
// TxOptions.
var (
ReadCommitted = &sql.TxOptions{
Isolation: sql.LevelReadCommitted,
}
ReadCommittedRO = &sql.TxOptions{
Isolation: sql.LevelReadCommitted,
ReadOnly: true,
}
)
// SQLError is an error returned by the database, which includes additionally
// debugging information about what was sent to the database.
type SQLError struct {
Err error
CallType string // either Query, QueryRow or Exec
TemplateName string
Query string
RawQuery string
ScanDest []any
// potentially regulated information is not exported and only directly
// available for local testing and local debugging purposes, making sure it
// is never marshaled to JSON or any other serialization.
arguments []any
}
func (e SQLError) Unwrap() error {
return e.Err
}
func (e SQLError) Error() string {
return fmt.Sprintf("%s: %s with %d input arguments and %d output "+
"destination arguments: %v", e.TemplateName, e.CallType,
len(e.arguments), len(e.ScanDest), e.Err)
}
// entity_folder table requests.
type sqlEntityFolderInsertRequest struct {
*sqltemplate.SQLTemplate
Items []*sqlEntityFolderInsertRequestItem
}
func (r sqlEntityFolderInsertRequest) Validate() error {
return nil // TODO
}
type sqlEntityFolderInsertRequestItem struct {
GUID string
Namespace string
UID string
SlugPath string
JS string
Depth int32
Left int32
Right int32
Detached bool
}
// entity_labels table requests.
type sqlEntityLabelsInsertRequest struct {
*sqltemplate.SQLTemplate
GUID string
Labels map[string]string
}
func (r sqlEntityLabelsInsertRequest) Validate() error {
return nil // TODO
}
type sqlEntityLabelsDeleteRequest struct {
*sqltemplate.SQLTemplate
GUID string
KeepLabels []string
}
func (r sqlEntityLabelsDeleteRequest) Validate() error {
return nil // TODO
}
// entity_kind table requests.
type returnsKindVersion struct {
ResourceVersion int64
CreatedAt int64
UpdatedAt int64
}
func (r *returnsKindVersion) Results() (*returnsKindVersion, error) {
return r, nil
}
type sqlKindVersionGetRequest struct {
*sqltemplate.SQLTemplate
Group string
Resource string
*returnsKindVersion
}
func (r sqlKindVersionGetRequest) Validate() error {
return nil // TODO
}
type sqlKindVersionLockRequest struct {
*sqltemplate.SQLTemplate
Group string
Resource string
*returnsKindVersion
}
func (r sqlKindVersionLockRequest) Validate() error {
return nil // TODO
}
type sqlKindVersionIncRequest struct {
*sqltemplate.SQLTemplate
Group string
Resource string
ResourceVersion int64
UpdatedAt int64
}
func (r sqlKindVersionIncRequest) Validate() error {
return nil // TODO
}
type sqlKindVersionInsertRequest struct {
*sqltemplate.SQLTemplate
Group string
Resource string
CreatedAt int64
UpdatedAt int64
}
func (r sqlKindVersionInsertRequest) Validate() error {
return nil // TODO
}
// entity and entity_history tables requests.
type sqlEntityListFolderElementsRequest struct {
*sqltemplate.SQLTemplate
Group string
Resource string
Namespace string
FolderInfo *folderInfo
}
func (r sqlEntityListFolderElementsRequest) Validate() error {
return nil // TODO
}
// sqlEntityReadRequest can be used to retrieve a row from either the "entity"
// or the "entity_history" tables. In particular, don't use this template
// directly. Instead, use the readEntity function, which provides all common use
// cases and proper database deserialization.
type sqlEntityReadRequest struct {
*sqltemplate.SQLTemplate
Key *grafanaregistry.Key
ResourceVersion int64
SelectForUpdate bool
returnsEntitySet
}
func (r sqlEntityReadRequest) Validate() error {
return nil // TODO
}
type sqlEntityDeleteRequest struct {
*sqltemplate.SQLTemplate
Key *grafanaregistry.Key
}
func (r sqlEntityDeleteRequest) Validate() error {
return nil // TODO
}
type sqlEntityInsertRequest struct {
*sqltemplate.SQLTemplate
Entity *returnsEntity
// TableEntity, when true, means we will insert into table "entity", and
// into table "entity_history" otherwise.
TableEntity bool
}
func (r sqlEntityInsertRequest) Validate() error {
return nil // TODO
}
type sqlEntityUpdateRequest struct {
*sqltemplate.SQLTemplate
Entity *returnsEntity
}
func (r sqlEntityUpdateRequest) Validate() error {
return nil // TODO
}
// newEmptyEntity allocates a new entity.Entity and all its internal state to be
// ready for use.
func newEmptyEntity() *entity.Entity {
return &entity.Entity{
// we need to allocate all internal pointer types so that they
// are readily available to be populated in the template
Origin: new(entity.EntityOriginInfo),
// we also set default empty values in slices and maps instead of nil to
// provide the most consistent JSON representation fields that will be
// serialized this way to the database.
Labels: map[string]string{},
Fields: map[string]string{},
Errors: []*entity.EntityErrorInfo{},
}
}
func cloneEntity(src *entity.Entity) *entity.Entity {
ret := newEmptyEntity()
proto.Merge(ret, src)
return ret
}
// returnsEntitySet can be embedded in a request struct to provide automatic set
// returning of []*entity.Entity from the database, deserializing as needed. It
// should be embedded as a value type.
// Example struct:
//
// type sqlMyRequest struct {
// *sqltemplate.SQLTemplate
// returnsEntitySet // embedded value type, not pointer type
// GUID string // example argument
// MaxResourceVersion int // example argument
// }
//
// Example struct usage::
//
// req := sqlMyRequest{
// SQLTemplate: sqltemplate.New(myDialect),
// returnsEntitySet: newReturnsEntitySet(),
// GUID: "abc",
// MaxResourceVersion: 1,
// }
// entities, err := query(myTx, myTmpl, req)
//
// Example usage in SQL template:
//
// SELECT
// {{ .Ident "guid" | .Into .Entity.Guid }},
// {{ .Ident "resource_version" | .Into .Entity.ResourceVersion }},
// {{ .Ident "body" | .Into .Entity.Body }}
// FROM {{ .Ident "entity_history" }}
// WHERE 1 = 1
// AND {{ .Ident "guid" }} = {{ .Arg .GUID }}
// AND {{ .Ident "resource_version" }} <= {{ .Arg .MaxResourceVersion }}
// ;
type returnsEntitySet struct {
Entity *returnsEntity
}
// newWithResults returns a new newWithResults.
func newReturnsEntitySet() returnsEntitySet {
return returnsEntitySet{
Entity: newReturnsEntity(),
}
}
// Results is part of the implementation of sqltemplate.WithResults that
// deserializes the database data into an internal *entity.Entity, and then
// returns a deep copy of it.
func (e returnsEntitySet) Results() (*entity.Entity, error) {
ent, err := e.Entity.Results()
if err != nil {
return nil, err
}
return cloneEntity(ent), nil
}
// returnsEntity is a wrapper that aids with database (de)serialization. It
// embeds a *entity.Entity to provide transparent access to all its fields, but
// overrides the ones that need database (de)serialization. It should be a named
// field in your request struct, with pointer type.
// Example struct:
//
// type sqlMyRequest struct {
// *sqltemplate.SQLTemplate
// Entity *returnsEntity // named field with pointer type
// GUID string // example argument
// ResourceVersion int // example argument
// }
//
// Example struct usage:
//
// req := sqlMyRequest{
// SQLTemplate: sqltemplate.New(myDialect),
// Entity: newReturnsEntity(),
// GUID: "abc",
// ResourceVersion: 1,
// }
// err := queryRow(myTx, myTmpl, req)
// // check err here
// err = req.Entity.unmarshal()
// // check err, and you can now use req.Entity.Entity
//
// Example usage in SQL template:
//
// SELECT
// {{ .Ident "guid" | .Into .Entity.Guid }},
// {{ .Ident "resource_version" | .Into .Entity.ResourceVersion }},
// {{ .Ident "body" | .Into .Entity.Body }}
// FROM {{ .Ident "entity" }}
// WHERE 1 =1
// AND {{ .Ident "guid" }} = {{ .Arg .GUID }}
// AND {{ .Ident "resource_version" }} = {{ .Arg .ResourceVersion }}
// ;
type returnsEntity struct {
*entity.Entity
Labels []byte
Fields []byte
Errors []byte
}
func newReturnsEntity() *returnsEntity {
return &returnsEntity{
Entity: newEmptyEntity(),
}
}
func (e *returnsEntity) Results() (*entity.Entity, error) {
if err := e.unmarshal(); err != nil {
return nil, err
}
return e.Entity, nil
}
// marshal serializes the fields from the wire protocol representation so they
// can be written to the database.
func (e *returnsEntity) marshal() error {
var err error
if len(e.Entity.Labels) == 0 {
e.Labels = []byte{'{', '}'}
} else {
e.Labels, err = json.Marshal(e.Entity.Labels)
if err != nil {
return fmt.Errorf("serialize entity \"labels\" field: %w", err)
}
}
if len(e.Entity.Fields) == 0 {
e.Fields = []byte{'{', '}'}
} else {
e.Fields, err = json.Marshal(e.Entity.Fields)
if err != nil {
return fmt.Errorf("serialize entity \"fields\" field: %w", err)
}
}
if len(e.Entity.Errors) == 0 {
e.Errors = []byte{'[', ']'}
} else {
e.Errors, err = json.Marshal(e.Entity.Errors)
if err != nil {
return fmt.Errorf("serialize entity \"errors\" field: %w", err)
}
}
return nil
}
// unmarshal deserializes the fields in the database representation so they can
// be written to the wire protocol.
func (e *returnsEntity) unmarshal() error {
if len(e.Labels) > 0 {
if err := json.Unmarshal(e.Labels, &e.Entity.Labels); err != nil {
return fmt.Errorf("deserialize entity \"labels\" field: %w", err)
}
} else {
e.Entity.Labels = map[string]string{}
}
if len(e.Fields) > 0 {
if err := json.Unmarshal(e.Fields, &e.Entity.Fields); err != nil {
return fmt.Errorf("deserialize entity \"fields\" field: %w", err)
}
} else {
e.Entity.Fields = map[string]string{}
}
if len(e.Errors) > 0 {
if err := json.Unmarshal(e.Errors, &e.Entity.Errors); err != nil {
return fmt.Errorf("deserialize entity \"errors\" field: %w", err)
}
} else {
e.Entity.Errors = []*entity.EntityErrorInfo{}
}
return nil
}
// readEntity returns the entity defined by the given key as it existed at
// version `asOfVersion`, if that value is greater than zero. The returned
// entity will have at most that version. If `asOfVersion` is zero, then the
// current version of that entity will be returned. If `optimisticLocking` is
// true, then the latest version of the entity will be retrieved and return an
// error if its version is not exactly `asOfVersion`. The option
// `selectForUpdate` will cause to acquire a row-level exclusive lock upon
// selecting it. `optimisticLocking` is ignored if `asOfVersion` is zero.
// Common errors to check:
// 1. ErrOptimisticLockingFailed: the latest version of the entity does not
// match the value of `asOfVersion`.
// 2. ErrNotFound: the entity does not currently exist, did not exist at the
// version of `asOfVersion` or was deleted.
func readEntity(
ctx context.Context,
x db.ContextExecer,
d sqltemplate.Dialect,
k *grafanaregistry.Key,
asOfVersion int64,
optimisticLocking bool,
selectForUpdate bool,
) (*returnsEntity, error) {
asOfVersion = max(asOfVersion, 0)
optimisticLocking = optimisticLocking && asOfVersion != 0
v := asOfVersion
if optimisticLocking {
// for optimistic locking, we will not ask for a specific version, but
// instead retrieve the latest version from the table "entity" and
// manually compare if it matches the given value of "asOfVersion".
v = 0
}
readReq := sqlEntityReadRequest{
SQLTemplate: sqltemplate.New(d),
Key: k,
ResourceVersion: v,
SelectForUpdate: selectForUpdate,
returnsEntitySet: newReturnsEntitySet(),
}
ent, err := queryRow(ctx, x, sqlEntityRead, readReq)
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("read entity: %w", err)
}
if ent.Action == entity.Entity_DELETED {
return nil, ErrNotFound
}
if optimisticLocking && asOfVersion != 0 && ent.ResourceVersion != asOfVersion {
return nil, ErrOptimisticLockingFailed
}
return readReq.Entity, nil
}
// kindVersionAtomicInc atomically increases the version of a kind within a
// transaction.
func kindVersionAtomicInc(ctx context.Context, x db.ContextExecer, d sqltemplate.Dialect, group, resource string) (newVersion int64, err error) {
now := time.Now().UnixMilli()
// 1. Lock the kind and get the latest version
lockReq := sqlKindVersionLockRequest{
SQLTemplate: sqltemplate.New(d),
Group: group,
Resource: resource,
returnsKindVersion: new(returnsKindVersion),
}
kindv, err := queryRow(ctx, x, sqlKindVersionLock, lockReq)
// if there wasn't a row associated with the given kind, we create one with
// version 1
if errors.Is(err, sql.ErrNoRows) {
// NOTE: there is a marginal chance that we race with another writer
// trying to create the same row. This is only possible when onboarding
// a new (Group, Resource) to the cell, which should be very unlikely,
// and the workaround is simply retrying. The alternative would be to
// use INSERT ... ON CONFLICT DO UPDATE ..., but that creates a
// requirement for support in Dialect only for this marginal case, and
// we would rather keep Dialect as small as possible. Another
// alternative is to simply check if the INSERT returns a DUPLICATE KEY
// error and then retry the original SELECT, but that also adds some
// complexity to the code. That would be preferrable to changing
// Dialect, though. The current alternative, just retrying, seems to be
// enough for now.
insReq := sqlKindVersionInsertRequest{
SQLTemplate: sqltemplate.New(d),
Group: group,
Resource: resource,
CreatedAt: now,
UpdatedAt: now,
}
if _, err = exec(ctx, x, sqlKindVersionInsert, insReq); err != nil {
return 0, fmt.Errorf("insert into kind_version: %w", err)
}
return 1, nil
}
if err != nil {
return 0, fmt.Errorf("lock kind: %w", err)
}
incReq := sqlKindVersionIncRequest{
SQLTemplate: sqltemplate.New(d),
Group: group,
Resource: resource,
ResourceVersion: kindv.ResourceVersion,
UpdatedAt: now,
}
if _, err = exec(ctx, x, sqlKindVersionInc, incReq); err != nil {
return 0, fmt.Errorf("increase kind version: %w", err)
}
return kindv.ResourceVersion + 1, nil
}
// Template helpers.
// helperListSep is a helper that helps writing simpler loops in SQL templates.
// Example usage:
//
// {{ $comma := listSep ", " }}
// {{ range .Values }}
// {{/* here we put "-" on each end to remove extra white space */}}
// {{- call $comma -}}
// {{ .Value }}
// {{ end }}
func helperListSep(sep string) func() string {
var addSep bool
return func() string {
if addSep {
return sep
}
addSep = true
return ""
}
}
func helperJoin(sep string, elems ...string) string {
return strings.Join(elems, sep)
}

View File

@ -1,822 +0,0 @@
package sqlstash
import (
"context"
"embed"
"encoding/json"
"errors"
"fmt"
"strings"
"testing"
"text/template"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/require"
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
"github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
"github.com/grafana/grafana/pkg/util/testutil"
)
// debug is meant to provide greater debugging detail about certain errors. The
// returned error will either provide more detailed information or be the same
// original error, suitable only for local debugging. The details provided are
// not meant to be logged, since they could include PII or otherwise
// sensitive/confidential information. These information should only be used for
// local debugging with fake or otherwise non-regulated information.
func debug(err error) error {
var d interface{ Debug() string }
if errors.As(err, &d) {
return errors.New(d.Debug())
}
return err
}
var _ = debug // silence the `unused` linter
//go:embed testdata/*
var testdataFS embed.FS
func testdata(t *testing.T, filename string) []byte {
t.Helper()
b, err := testdataFS.ReadFile(`testdata/` + filename)
require.NoError(t, err)
return b
}
func testdataJSON(t *testing.T, filename string, dest any) {
t.Helper()
b := testdata(t, filename)
err := json.Unmarshal(b, dest)
require.NoError(t, err)
}
func TestQueries(t *testing.T) {
t.Parallel()
// Each template has one or more test cases, each identified with a
// descriptive name (e.g. "happy path", "error twiddling the frobb"). Each
// of them will test that for the same input data they must produce a result
// that will depend on the Dialect. Expected queries should be defined in
// separate files in the testdata directory. This improves the testing
// experience by separating test data from test code, since mixing both
// tends to make it more difficult to reason about what is being done,
// especially as we want testing code to scale and make it easy to add
// tests.
type (
// type aliases to make code more semantic and self-documenting
resultSQLFilename = string
dialects = []sqltemplate.Dialect
expected map[resultSQLFilename]dialects
testCase = struct {
Name string
// Data should be the struct passed to the template.
Data sqltemplate.SQLTemplateIface
// Expected maps the filename containing the expected result query
// to the list of dialects that would produce it. For simple
// queries, it is possible that more than one dialect produce the
// same output. The filename is expected to be in the `testdata`
// directory.
Expected expected
}
)
// Define tests cases. Most templates are trivial and testing that they
// generate correct code for a single Dialect is fine, since the one thing
// that always changes is how SQL placeholder arguments are passed (most
// Dialects use `?` while PostgreSQL uses `$1`, `$2`, etc.), and that is
// something that should be tested in the Dialect implementation instead of
// here. We will ask to have at least one test per SQL template, and we will
// lean to test MySQL. Templates containing branching (conditionals, loops,
// etc.) should be exercised at least once in each of their branches.
//
// NOTE: in the Data field, make sure to have pointers populated to simulate
// data is set as it would be in a real request. The data being correctly
// populated in each case should be tested in integration tests, where the
// data will actually flow to and from a real database. In this tests we
// only care about producing the correct SQL.
testCases := map[*template.Template][]*testCase{
sqlEntityDelete: {
{
Name: "single path",
Data: &sqlEntityDeleteRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
Key: new(grafanaregistry.Key),
},
Expected: expected{
"entity_delete_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
"entity_delete_postgres.sql": dialects{
sqltemplate.PostgreSQL,
},
},
},
},
sqlEntityInsert: {
{
Name: "insert into entity",
Data: &sqlEntityInsertRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
Entity: newReturnsEntity(),
TableEntity: true,
},
Expected: expected{
"entity_insert_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
{
Name: "insert into entity_history",
Data: &sqlEntityInsertRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
Entity: newReturnsEntity(),
TableEntity: false,
},
Expected: expected{
"entity_history_insert_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
},
sqlEntityListFolderElements: {
{
Name: "single path",
Data: &sqlEntityListFolderElementsRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
FolderInfo: new(folderInfo),
},
Expected: expected{
"entity_list_folder_elements_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
},
sqlEntityRead: {
{
Name: "with resource version and select for update",
Data: &sqlEntityReadRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
Key: new(grafanaregistry.Key),
ResourceVersion: 1,
SelectForUpdate: true,
returnsEntitySet: returnsEntitySet{
Entity: newReturnsEntity(),
},
},
Expected: expected{
"entity_history_read_full_mysql.sql": dialects{
sqltemplate.MySQL,
},
},
},
{
Name: "without resource version and select for update",
Data: &sqlEntityReadRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
Key: new(grafanaregistry.Key),
returnsEntitySet: returnsEntitySet{
Entity: newReturnsEntity(),
},
},
Expected: expected{
"entity_read_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
},
sqlEntityUpdate: {
{
Name: "single path",
Data: &sqlEntityUpdateRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
Entity: newReturnsEntity(),
},
Expected: expected{
"entity_update_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
},
sqlEntityFolderInsert: {
{
Name: "one item",
Data: &sqlEntityFolderInsertRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
Items: []*sqlEntityFolderInsertRequestItem{{}},
},
Expected: expected{
"entity_folder_insert_1_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
},
},
},
{
Name: "two items",
Data: &sqlEntityFolderInsertRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
Items: []*sqlEntityFolderInsertRequestItem{{}, {}},
},
Expected: expected{
"entity_folder_insert_2_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
},
},
},
},
sqlEntityLabelsDelete: {
{
Name: "one element",
Data: &sqlEntityLabelsDeleteRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
KeepLabels: []string{"one"},
},
Expected: expected{
"entity_labels_delete_1_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
{
Name: "two elements",
Data: &sqlEntityLabelsDeleteRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
KeepLabels: []string{"one", "two"},
},
Expected: expected{
"entity_labels_delete_2_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
},
sqlEntityLabelsInsert: {
{
Name: "one element",
Data: &sqlEntityLabelsInsertRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
Labels: map[string]string{"lbl1": "val1"},
},
Expected: expected{
"entity_labels_insert_1_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
{
Name: "two elements",
Data: &sqlEntityLabelsInsertRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
Labels: map[string]string{"lbl1": "val1", "lbl2": "val2"},
},
Expected: expected{
"entity_labels_insert_2_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
},
sqlKindVersionGet: {
{
Name: "single path",
Data: &sqlKindVersionGetRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
returnsKindVersion: new(returnsKindVersion),
},
Expected: expected{
"kind_version_get_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
},
sqlKindVersionInc: {
{
Name: "single path",
Data: &sqlKindVersionIncRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
},
Expected: expected{
"kind_version_inc_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
},
sqlKindVersionInsert: {
{
Name: "single path",
Data: &sqlKindVersionInsertRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
},
Expected: expected{
"kind_version_insert_mysql_sqlite.sql": dialects{
sqltemplate.MySQL,
sqltemplate.SQLite,
},
},
},
},
sqlKindVersionLock: {
{
Name: "single path",
Data: &sqlKindVersionLockRequest{
SQLTemplate: new(sqltemplate.SQLTemplate),
returnsKindVersion: new(returnsKindVersion),
},
Expected: expected{
"kind_version_lock_mysql.sql": dialects{
sqltemplate.MySQL,
},
"kind_version_lock_postgres.sql": dialects{
sqltemplate.PostgreSQL,
},
"kind_version_lock_sqlite.sql": dialects{
sqltemplate.SQLite,
},
},
},
},
}
// Execute test cases
for tmpl, tcs := range testCases {
t.Run(tmpl.Name(), func(t *testing.T) {
t.Parallel()
for _, tc := range tcs {
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
for filename, ds := range tc.Expected {
t.Run(filename, func(t *testing.T) {
// not parallel because we're sharing tc.Data, not
// worth it deep cloning
rawQuery := string(testdata(t, filename))
expectedQuery := sqltemplate.FormatSQL(rawQuery)
for _, d := range ds {
t.Run(d.DialectName(), func(t *testing.T) {
// not parallel for the same reason
tc.Data.SetDialect(d)
err := tc.Data.Validate()
require.NoError(t, err)
got, err := sqltemplate.Execute(tmpl, tc.Data)
require.NoError(t, err)
got = sqltemplate.FormatSQL(got)
require.Equal(t, expectedQuery, got)
})
}
})
}
})
}
})
}
}
func TestReturnsEntity_marshal(t *testing.T) {
t.Parallel()
// test data for maps
someMap := map[string]string{
"alpha": "aleph",
"beta": "beth",
}
someMapJSONb, err := json.Marshal(someMap)
require.NoError(t, err)
someMapJSON := string(someMapJSONb)
// test data for errors
someErrors := []*entity.EntityErrorInfo{
{
Code: 1,
Message: "not cool",
DetailsJson: []byte(`"nothing to add"`),
},
}
someErrorsJSONb, err := json.Marshal(someErrors)
require.NoError(t, err)
someErrorsJSON := string(someErrorsJSONb)
t.Run("happy path - nothing to marshal", func(t *testing.T) {
t.Parallel()
d := &returnsEntity{
Entity: &entity.Entity{
Labels: map[string]string{},
Fields: map[string]string{},
Errors: []*entity.EntityErrorInfo{},
},
}
err := d.marshal()
require.NoError(t, err)
require.JSONEq(t, `{}`, string(d.Labels))
require.JSONEq(t, `{}`, string(d.Fields))
require.JSONEq(t, `[]`, string(d.Errors))
// nil Go Object/Slice map to empty JSON Object/Array for consistency
d.Entity = new(entity.Entity)
err = d.marshal()
require.NoError(t, err)
require.JSONEq(t, `{}`, string(d.Labels))
require.JSONEq(t, `{}`, string(d.Fields))
require.JSONEq(t, `[]`, string(d.Errors))
})
t.Run("happy path - everything to marshal", func(t *testing.T) {
t.Parallel()
d := &returnsEntity{
Entity: &entity.Entity{
Labels: someMap,
Fields: someMap,
Errors: someErrors,
},
}
err := d.marshal()
require.NoError(t, err)
require.JSONEq(t, someMapJSON, string(d.Labels))
require.JSONEq(t, someMapJSON, string(d.Fields))
require.JSONEq(t, someErrorsJSON, string(d.Errors))
})
// NOTE: the error path for serialization is apparently unreachable. If you
// find a way to simulate a serialization error, consider raising awareness
// of such case(s) and add the corresponding tests here
}
func TestReturnsEntity_unmarshal(t *testing.T) {
t.Parallel()
t.Run("happy path - nothing to unmarshal", func(t *testing.T) {
t.Parallel()
e := newReturnsEntity()
err := e.unmarshal()
require.NoError(t, err)
require.NotNil(t, e.Entity.Labels)
require.NotNil(t, e.Entity.Fields)
require.NotNil(t, e.Entity.Errors)
})
t.Run("happy path - everything to unmarshal", func(t *testing.T) {
t.Parallel()
e := newReturnsEntity()
e.Labels = []byte(`{}`)
e.Fields = []byte(`{}`)
e.Errors = []byte(`[]`)
err := e.unmarshal()
require.NoError(t, err)
require.NotNil(t, e.Entity.Labels)
require.NotNil(t, e.Entity.Fields)
require.NotNil(t, e.Entity.Errors)
})
t.Run("fail to unmarshal", func(t *testing.T) {
t.Parallel()
var jsonInvalid = []byte(`.`)
e := newReturnsEntity()
e.Labels = jsonInvalid
err := e.unmarshal()
require.Error(t, err)
require.ErrorContains(t, err, "labels")
e = newReturnsEntity()
e.Labels = nil
e.Fields = jsonInvalid
err = e.unmarshal()
require.Error(t, err)
require.ErrorContains(t, err, "fields")
e = newReturnsEntity()
e.Fields = nil
e.Errors = jsonInvalid
err = e.unmarshal()
require.Error(t, err)
require.ErrorContains(t, err, "errors")
})
}
func TestReadEntity(t *testing.T) {
t.Parallel()
// readonly, shared data for all subtests
expectedEntity := newEmptyEntity()
testdataJSON(t, `grpc-res-entity.json`, expectedEntity)
key, err := grafanaregistry.ParseKey(expectedEntity.Key)
require.NoErrorf(t, err, "provided key: %#v", expectedEntity)
t.Run("happy path - entity table, optimistic locking", func(t *testing.T) {
t.Parallel()
ctx := testutil.NewDefaultTestContext(t)
db, mock := newMockDBMatchWords(t)
x := expectReadEntity(t, mock, cloneEntity(expectedEntity))
x(ctx, db)
})
t.Run("happy path - entity table, no optimistic locking", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
db, mock := newMockDBMatchWords(t)
readReq := sqlEntityReadRequest{ // used to generate mock results
SQLTemplate: sqltemplate.New(sqltemplate.MySQL),
Key: new(grafanaregistry.Key),
returnsEntitySet: newReturnsEntitySet(),
}
readReq.Entity.Entity = cloneEntity(expectedEntity)
results := newMockResults(t, mock, sqlEntityRead, readReq)
// setup expectations
results.AddCurrentData()
mock.ExpectQuery(`select from entity where !resource_version update`).
WillReturnRows(results.Rows())
// execute and assert
e, err := readEntity(ctx, db, sqltemplate.MySQL, key, 0, false, true)
require.NoError(t, err)
require.Equal(t, expectedEntity, e.Entity)
})
t.Run("happy path - entity_history table", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
db, mock := newMockDBMatchWords(t)
readReq := sqlEntityReadRequest{ // used to generate mock results
SQLTemplate: sqltemplate.New(sqltemplate.MySQL),
Key: new(grafanaregistry.Key),
returnsEntitySet: newReturnsEntitySet(),
}
readReq.Entity.Entity = cloneEntity(expectedEntity)
results := newMockResults(t, mock, sqlEntityRead, readReq)
// setup expectations
results.AddCurrentData()
mock.ExpectQuery(`select from entity_history where resource_version !update`).
WillReturnRows(results.Rows())
// execute and assert
e, err := readEntity(ctx, db, sqltemplate.MySQL, key,
expectedEntity.ResourceVersion, false, false)
require.NoError(t, err)
require.Equal(t, expectedEntity, e.Entity)
})
t.Run("entity table, optimistic locking failed", func(t *testing.T) {
t.Parallel()
ctx := testutil.NewDefaultTestContext(t)
db, mock := newMockDBMatchWords(t)
x := expectReadEntity(t, mock, nil)
x(ctx, db)
})
t.Run("entity_history table, entity not found", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
db, mock := newMockDBMatchWords(t)
readReq := sqlEntityReadRequest{ // used to generate mock results
SQLTemplate: sqltemplate.New(sqltemplate.MySQL),
Key: new(grafanaregistry.Key),
returnsEntitySet: newReturnsEntitySet(),
}
results := newMockResults(t, mock, sqlEntityRead, readReq)
// setup expectations
mock.ExpectQuery(`select from entity_history where resource_version !update`).
WillReturnRows(results.Rows())
// execute and assert
e, err := readEntity(ctx, db, sqltemplate.MySQL, key,
expectedEntity.ResourceVersion, false, false)
require.Nil(t, e)
require.Error(t, err)
require.ErrorIs(t, err, ErrNotFound)
})
t.Run("entity_history table, entity was deleted = not found", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
db, mock := newMockDBMatchWords(t)
readReq := sqlEntityReadRequest{ // used to generate mock results
SQLTemplate: sqltemplate.New(sqltemplate.MySQL),
Key: new(grafanaregistry.Key),
returnsEntitySet: newReturnsEntitySet(),
}
readReq.Entity.Entity = cloneEntity(expectedEntity)
readReq.Entity.Entity.Action = entity.Entity_DELETED
results := newMockResults(t, mock, sqlEntityRead, readReq)
// setup expectations
results.AddCurrentData()
mock.ExpectQuery(`select from entity_history where resource_version !update`).
WillReturnRows(results.Rows())
// execute and assert
e, err := readEntity(ctx, db, sqltemplate.MySQL, key,
expectedEntity.ResourceVersion, false, false)
require.Nil(t, e)
require.Error(t, err)
require.ErrorIs(t, err, ErrNotFound)
})
}
// expectReadEntity arranges test expectations so that it's easier to reuse
// across tests that need to call `readEntity`. If you provide a non-nil
// *entity.Entity, that will be returned by `readEntity`. If it's nil, then
// `readEntity` will return ErrOptimisticLockingFailed. It returns the function
// to execute the actual test and assert the expectations that were set.
func expectReadEntity(t *testing.T, mock sqlmock.Sqlmock, e *entity.Entity) func(ctx context.Context, db db.DB) {
t.Helper()
// test declarations
readReq := sqlEntityReadRequest{ // used to generate mock results
SQLTemplate: sqltemplate.New(sqltemplate.MySQL),
Key: new(grafanaregistry.Key),
returnsEntitySet: newReturnsEntitySet(),
}
results := newMockResults(t, mock, sqlEntityRead, readReq)
if e != nil {
readReq.Entity.Entity = cloneEntity(e)
}
// setup expectations
results.AddCurrentData()
mock.ExpectQuery(`select from entity where !resource_version update`).
WillReturnRows(results.Rows())
// execute and assert
if e != nil {
return func(ctx context.Context, db db.DB) {
ent, err := readEntity(ctx, db, sqltemplate.MySQL, readReq.Key,
e.ResourceVersion, true, true)
require.NoError(t, err)
require.Equal(t, e, ent.Entity)
}
}
return func(ctx context.Context, db db.DB) {
ent, err := readEntity(ctx, db, sqltemplate.MySQL, readReq.Key, 1, true,
true)
require.Nil(t, ent)
require.Error(t, err)
require.ErrorIs(t, err, ErrOptimisticLockingFailed)
}
}
func TestKindVersionAtomicInc(t *testing.T) {
t.Parallel()
t.Run("happy path - row locked", func(t *testing.T) {
t.Parallel()
// test declarations
const curVersion int64 = 1
ctx := testutil.NewDefaultTestContext(t)
db, mock := newMockDBMatchWords(t)
// setup expectations
mock.ExpectQuery(`select resource_version from kind_version where group resource update`).
WillReturnRows(mock.NewRows([]string{"resource_version"}).AddRow(curVersion))
mock.ExpectExec("update kind_version set resource_version updated_at where group resource").
WillReturnResult(sqlmock.NewResult(0, 1))
// execute and assert
gotVersion, err := kindVersionAtomicInc(ctx, db, sqltemplate.MySQL, "groupname", "resname")
require.NoError(t, err)
require.Equal(t, curVersion+1, gotVersion)
})
t.Run("happy path - row created", func(t *testing.T) {
t.Parallel()
ctx := testutil.NewDefaultTestContext(t)
db, mock := newMockDBMatchWords(t)
x := expectKindVersionAtomicInc(t, mock, false)
x(ctx, db)
})
t.Run("fail to create row", func(t *testing.T) {
t.Parallel()
ctx := testutil.NewDefaultTestContext(t)
db, mock := newMockDBMatchWords(t)
x := expectKindVersionAtomicInc(t, mock, true)
x(ctx, db)
})
}
// expectKindVersionAtomicInc arranges test expectations so that it's easier to
// reuse across tests that need to call `kindVersionAtomicInc`. If you the test
// shuld fail, it will do so with `errTest`, and it will return resource version
// 1 otherwise. It returns the function to execute the actual test and assert
// the expectations that were set.
func expectKindVersionAtomicInc(t *testing.T, mock sqlmock.Sqlmock, shouldFail bool) func(ctx context.Context, db db.DB) {
t.Helper()
// setup expectations
mock.ExpectQuery(`select resource_version from kind_version where group resource update`).
WillReturnRows(mock.NewRows([]string{"resource_version"}))
call := mock.ExpectExec("insert kind_version resource_version")
// execute and assert
if shouldFail {
call.WillReturnError(errTest)
return func(ctx context.Context, db db.DB) {
gotVersion, err := kindVersionAtomicInc(ctx, db, sqltemplate.MySQL, "groupname", "resname")
require.Zero(t, gotVersion)
require.Error(t, err)
require.ErrorIs(t, err, errTest)
}
}
call.WillReturnResult(sqlmock.NewResult(0, 1))
return func(ctx context.Context, db db.DB) {
gotVersion, err := kindVersionAtomicInc(ctx, db, sqltemplate.MySQL, "groupname", "resname")
require.NoError(t, err)
require.Equal(t, int64(1), gotVersion)
}
}
func TestMustTemplate(t *testing.T) {
t.Parallel()
require.Panics(t, func() {
mustTemplate("non existent file")
})
}
// Debug provides greater detail about the SQL error. It is defined on the same
// struct but on a test file so that the intention that its results should not
// be used in runtime code is very clear. The results could include PII or
// otherwise regulated information, hence this method is only available in
// tests, so that it can be used in local debugging only. Note that the error
// information may still be available through other means, like using the
// "reflect" package, so care must be taken not to ever expose these information
// in production.
func (e SQLError) Debug() string {
scanDestStr := "(none)"
if len(e.ScanDest) > 0 {
format := "[%T" + strings.Repeat(", %T", len(e.ScanDest)-1) + "]"
scanDestStr = fmt.Sprintf(format, e.ScanDest...)
}
return fmt.Sprintf("%s: %s: %v\n\tArguments (%d): %#v\n\tReturn Value "+
"Types (%d): %s\n\tExecuted Query: %s\n\tRaw SQL Template Output: %s",
e.TemplateName, e.CallType, e.Err, len(e.arguments), e.arguments,
len(e.ScanDest), scanDestStr, e.Query, e.RawQuery)
}

View File

@ -1,221 +0,0 @@
package sqlstash
import (
"encoding/json"
"strings"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
)
type Direction int
const (
Ascending Direction = iota
Descending
)
func (d Direction) String() string {
if d == Descending {
return "DESC"
}
return "ASC"
}
type joinQuery struct {
query string
args []any
}
type whereClause struct {
query string
args []any
}
type selectQuery struct {
dialect migrator.Dialect
fields []string // SELECT xyz
from string // FROM object
joins []joinQuery // JOIN object
offset int64
limit int64
oneExtra bool
where []whereClause
groupBy []string
orderBy []string
direction []Direction
}
func NewSelectQuery(dialect migrator.Dialect, from string) *selectQuery {
return &selectQuery{
dialect: dialect,
from: from,
}
}
func (q *selectQuery) From(from string) {
q.from = from
}
func (q *selectQuery) SetLimit(limit int64) {
q.limit = limit
}
func (q *selectQuery) SetOffset(offset int64) {
q.offset = offset
}
func (q *selectQuery) SetOneExtra() {
q.oneExtra = true
}
func (q *selectQuery) UnsetOneExtra() {
q.oneExtra = false
}
func (q *selectQuery) AddFields(f ...string) {
for _, field := range f {
q.fields = append(q.fields, "t."+q.dialect.Quote(field))
}
}
func (q *selectQuery) AddRawFields(f ...string) {
q.fields = append(q.fields, f...)
}
func (q *selectQuery) AddJoin(j string, args ...any) {
q.joins = append(q.joins, joinQuery{query: j, args: args})
}
func (q *selectQuery) AddWhere(f string, val ...any) {
// if the field contains a question mark, we assume it's a raw where clause
if strings.Contains(f, "?") {
q.where = append(q.where, whereClause{f, val})
// otherwise we assume it's a field name
} else {
q.where = append(q.where, whereClause{"t." + q.dialect.Quote(f) + "=?", val})
}
}
func (q *selectQuery) AddWhereInSubquery(f string, subquery string, subqueryArgs []any) {
q.where = append(q.where, whereClause{"t." + q.dialect.Quote(f) + " IN (" + subquery + ")", subqueryArgs})
}
func (q *selectQuery) AddWhereIn(f string, vals []any) {
count := len(vals)
if count > 1 {
sb := strings.Builder{}
sb.WriteString("t." + q.dialect.Quote(f))
sb.WriteString(" IN (")
for i := 0; i < count; i++ {
if i > 0 {
sb.WriteString(",")
}
sb.WriteString("?")
}
sb.WriteString(") ")
q.where = append(q.where, whereClause{sb.String(), vals})
} else if count == 1 {
q.AddWhere(f, vals[0])
}
}
func ToAnyList[T any](input []T) []any {
list := make([]any, len(input))
for i, v := range input {
list[i] = v
}
return list
}
const sqlLikeEscape = "#"
var sqlLikeEscapeReplacer = strings.NewReplacer(
sqlLikeEscape, sqlLikeEscape+sqlLikeEscape,
"%", sqlLikeEscape+"%",
"_", sqlLikeEscape+"_",
)
func escapeJSONStringSQLLike(s string) string {
b, _ := json.Marshal(s)
return sqlLikeEscapeReplacer.Replace(string(b))
}
func (q *selectQuery) AddWhereJsonContainsKV(field string, key string, value string) {
escapedKey := escapeJSONStringSQLLike(key)
escapedValue := escapeJSONStringSQLLike(value)
q.where = append(q.where, whereClause{
"t." + q.dialect.Quote(field) + " LIKE ? ESCAPE ?",
[]any{"{%\"" + escapedKey + "\":\"" + escapedValue + "\"%}", sqlLikeEscape},
})
}
func (q *selectQuery) AddGroupBy(f string) {
q.groupBy = append(q.groupBy, f)
}
func (q *selectQuery) AddOrderBy(field string, direction Direction) {
q.orderBy = append(q.orderBy, field)
q.direction = append(q.direction, direction)
}
func (q *selectQuery) ToQuery() (string, []any) {
args := []any{}
sb := strings.Builder{}
sb.WriteString("SELECT ")
sb.WriteString(strings.Join(q.fields, ","))
sb.WriteString(" FROM ")
sb.WriteString(q.from)
sb.WriteString(" AS t")
for _, j := range q.joins {
sb.WriteString(" " + j.query)
args = append(args, j.args...)
}
// Templated where string
if len(q.where) > 0 {
sb.WriteString(" WHERE ")
for i, w := range q.where {
if i > 0 {
sb.WriteString(" AND ")
}
sb.WriteString(w.query)
args = append(args, w.args...)
}
}
if len(q.groupBy) > 0 {
sb.WriteString(" GROUP BY ")
for i, f := range q.groupBy {
if i > 0 {
sb.WriteString(",")
}
sb.WriteString("t." + q.dialect.Quote(f))
}
}
if len(q.orderBy) > 0 && len(q.direction) == len(q.orderBy) {
sb.WriteString(" ORDER BY ")
for i, f := range q.orderBy {
if i > 0 {
sb.WriteString(",")
}
sb.WriteString("t." + q.dialect.Quote(f))
sb.WriteString(" ")
sb.WriteString(q.direction[i].String())
}
}
limit := q.limit
if limit > 0 {
if q.oneExtra {
limit = limit + 1
}
sb.WriteString(q.dialect.LimitOffset(limit, q.offset))
}
return sb.String(), args
}

File diff suppressed because it is too large Load Diff

View File

@ -1,44 +0,0 @@
package sqlstash
import (
"testing"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/require"
traceNoop "go.opentelemetry.io/otel/trace/noop"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/sqlstore/session"
"github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
"github.com/grafana/grafana/pkg/util/testutil"
)
func newTestSQLEntityServer(t *testing.T) (*sqlEntityServer, sqlmock.Sqlmock) {
db, mock := newMockDBMatchWords(t)
return &sqlEntityServer{
log: log.NewNopLogger(),
tracer: traceNoop.NewTracerProvider().Tracer("test-tracer"),
sess: new(session.SessionDB), // FIXME
sqlDB: db,
sqlDialect: sqltemplate.MySQL,
}, mock
}
func TestIsHealthy(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
s, mock := newTestSQLEntityServer(t)
// setup expectations
mock.ExpectPing()
// execute and assert
_, err := s.IsHealthy(ctx, new(entity.HealthCheckRequest))
require.NoError(t, err)
}

View File

@ -1 +0,0 @@
DELETE FROM "entity" WHERE 1 = 1 AND "namespace" = ? AND "group" = ? AND "resource" = ? AND "name" = ?;

View File

@ -1 +0,0 @@
DELETE FROM "entity" WHERE 1 = 1 AND "namespace" = $1 AND "group" = $2 AND "resource" = $3 AND "name" = $4;

View File

@ -1,3 +0,0 @@
INSERT INTO "entity_folder"
("guid", "namespace", "name", "slug_path", "tree", "depth", "lft", "rgt", "detached")
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);

View File

@ -1,5 +0,0 @@
INSERT INTO "entity_folder"
("guid", "namespace", "name", "slug_path", "tree", "depth", "lft", "rgt", "detached")
VALUES
(?, ?, ?, ?, ?, ?, ?, ?, ?),
(?, ?, ?, ?, ?, ?, ?, ?, ?);

View File

@ -1,3 +0,0 @@
INSERT INTO "entity_history"
("guid", "resource_version", "key", "group", "group_version", "resource", "namespace", "name", "folder", "meta", "body", "status", "size", "etag", "created_at", "created_by", "updated_at", "updated_by", "origin", "origin_key", "origin_ts", "title", "slug", "description", "message", "labels", "fields", "errors", "action")
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);

View File

@ -1,5 +0,0 @@
SELECT e."guid", e."resource_version", e."key", e."group", e."group_version", e."resource", e."namespace", e."name", e."folder", e."meta", e."body", e."status", e."size", e."etag", e."created_at", e."created_by", e."updated_at", e."updated_by", e."origin", e."origin_key", e."origin_ts", e."title", e."slug", e."description", e."message", e."labels", e."fields", e."errors", e."action"
FROM "entity_history" AS e
WHERE 1 = 1 AND "namespace" = ? AND "group" = ? AND "resource" = ? AND "name" = ? AND "resource_version" <= ?
ORDER BY "resource_version" DESC
LIMIT 1 FOR UPDATE NOWAIT;

View File

@ -1,3 +0,0 @@
INSERT INTO "entity"
("guid", "resource_version", "key", "group", "group_version", "resource", "namespace", "name", "folder", "meta", "body", "status", "size", "etag", "created_at", "created_by", "updated_at", "updated_by", "origin", "origin_key", "origin_ts", "title", "slug", "description", "message", "labels", "fields", "errors", "action")
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);

View File

@ -1 +0,0 @@
DELETE FROM "entity_labels" WHERE 1 = 1 AND "guid" = ? AND "label" NOT IN (?);

View File

@ -1 +0,0 @@
DELETE FROM "entity_labels" WHERE 1 = 1 AND "guid" = ? AND "label" NOT IN (?, ?);

View File

@ -1,2 +0,0 @@
INSERT INTO "entity_labels" ("guid", "label", "value")
VALUES (?, ?, ?);

View File

@ -1,3 +0,0 @@
INSERT INTO "entity_labels" ("guid", "label", "value") VALUES
(?, ?, ?),
(?, ?, ?);

View File

@ -1,3 +0,0 @@
SELECT "guid", "name", "folder", "name", "slug"
FROM "entity"
WHERE 1 = 1 AND "group" = ? AND "resource" = ? AND "namespace" = ?;

View File

@ -1,3 +0,0 @@
SELECT e."guid", e."resource_version", e."key", e."group", e."group_version", e."resource", e."namespace", e."name", e."folder", e."meta", e."body", e."status", e."size", e."etag", e."created_at", e."created_by", e."updated_at", e."updated_by", e."origin", e."origin_key", e."origin_ts", e."title", e."slug", e."description", e."message", e."labels", e."fields", e."errors", e."action"
FROM "entity" AS e
WHERE 1 = 1 AND "namespace" = ? AND "group" = ? AND "resource" = ? AND "name" = ?;

View File

@ -1,2 +0,0 @@
UPDATE "entity" SET "resource_version" = ?, "group_version" = ?, "folder" = ?, "meta" = ?, "body" = ?, "status" = ?, "size" = ?, "etag" = ?, "updated_at" = ?, "updated_by" = ?, "origin" = ?, "origin_key" = ?, "origin_ts" = ?, "title" = ?, "slug" = ?, "description" = ?, "message" = ?, "labels" = ?, "fields" = ?, "errors" = ?, "action" = ?
WHERE "guid" = ?;

View File

@ -1,21 +0,0 @@
{
"entity": {
"guid": "b0199c60-5f3a-41be-9ba6-7a52f1de83ff",
"group": "playlist.grafana.app",
"resource": "playlists",
"namespace": "default",
"name": "adnj1llchbbi8a",
"group_version": "v0alpha1",
"key": "/group/playlist.grafana.app/resource/playlists/namespaces/default/name/adnj1llchbbi8a",
"meta": "eyJtZXRhZGF0YSI6eyJuYW1lIjoiYWRuajFsbGNoYmJpOGEiLCJuYW1lc3BhY2UiOiJkZWZhdWx0IiwidWlkIjoiYjAxOTljNjAtNWYzYS00MWJlLTliYTYtN2E1MmYxZGU4M2ZmIiwiY3JlYXRpb25UaW1lc3RhbXAiOiIyMDI0LTA2LTAyVDAzOjI4OjE3WiIsImFubm90YXRpb25zIjp7ImdyYWZhbmEuYXBwL29yaWdpbktleSI6IjIiLCJncmFmYW5hLmFwcC9vcmlnaW5OYW1lIjoiU1FMIiwiZ3JhZmFuYS5hcHAvb3JpZ2luVGltZXN0YW1wIjoiMjAyNC0wNi0wMlQwMzoyODoxN1oiLCJncmFmYW5hLmFwcC91cGRhdGVkVGltZXN0YW1wIjoiMjAyNC0wNi0wMlQwMzoyODoxN1oifX19",
"body": "eyJraW5kIjoiUGxheWxpc3QiLCJhcGlWZXJzaW9uIjoicGxheWxpc3QuZ3JhZmFuYS5hcHAvdjBhbHBoYTEiLCJtZXRhZGF0YSI6eyJuYW1lIjoiYWRuajFsbGNoYmJpOGEiLCJuYW1lc3BhY2UiOiJkZWZhdWx0IiwidWlkIjoiYjAxOTljNjAtNWYzYS00MWJlLTliYTYtN2E1MmYxZGU4M2ZmIiwiY3JlYXRpb25UaW1lc3RhbXAiOiIyMDI0LTA2LTAyVDAzOjI4OjE3WiIsImFubm90YXRpb25zIjp7ImdyYWZhbmEuYXBwL29yaWdpbktleSI6IjIiLCJncmFmYW5hLmFwcC9vcmlnaW5OYW1lIjoiU1FMIiwiZ3JhZmFuYS5hcHAvb3JpZ2luVGltZXN0YW1wIjoiMjAyNC0wNi0wMlQwMzoyODoxN1oiLCJncmFmYW5hLmFwcC91cGRhdGVkVGltZXN0YW1wIjoiMjAyNC0wNi0wMlQwMzoyODoxN1oifX0sInNwZWMiOnsidGl0bGUiOiJ0ZXN0IHBsYXlsaXN0IiwiaW50ZXJ2YWwiOiI1bSIsIml0ZW1zIjpbeyJ0eXBlIjoiZGFzaGJvYXJkX2J5X3VpZCIsInZhbHVlIjoiY2RuaXY1M2dtZDR3MGUifV19fQo=",
"title": "test playlist",
"created_at": 1717298897750,
"updated_at": 1717298897000,
"origin": {
"source": "SQL",
"key": "2",
"time": 1717298897000
}
}
}

View File

@ -1,3 +0,0 @@
{
"key": "/group/playlist.grafana.app/resource/playlists/namespaces/default/name/sdfsdfsdf"
}

View File

@ -1,18 +0,0 @@
{
"entity": {
"guid": "3c769b2e-aaa7-46f6-ab83-e038050a6a75",
"resource_version": 1,
"group": "playlist.grafana.app",
"resource": "playlists",
"namespace": "default",
"name": "sdfsdfsdf",
"group_version": "v0alpha1",
"key": "/group/playlist.grafana.app/resource/playlists/namespaces/default/name/sdfsdfsdf",
"meta": "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2Rmc2Rmc2RmIiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsInVpZCI6IjNjNzY5YjJlLWFhYTctNDZmNi1hYjgzLWUwMzgwNTBhNmE3NSIsInJlc291cmNlVmVyc2lvbiI6IjEiLCJjcmVhdGlvblRpbWVzdGFtcCI6IjIwMjQtMDYtMDJUMDM6NDk6MjlaIiwibWFuYWdlZEZpZWxkcyI6W3sibWFuYWdlciI6Ik1vemlsbGEiLCJvcGVyYXRpb24iOiJVcGRhdGUiLCJhcGlWZXJzaW9uIjoicGxheWxpc3QuZ3JhZmFuYS5hcHAvdjBhbHBoYTEiLCJ0aW1lIjoiMjAyNC0wNi0wMlQwMzo1Mzo1NVoiLCJmaWVsZHNUeXBlIjoiRmllbGRzVjEiLCJmaWVsZHNWMSI6eyJmOnNwZWMiOnsiZjppbnRlcnZhbCI6e30sImY6aXRlbXMiOnt9LCJmOnRpdGxlIjp7fX19fV19fQ==",
"body": "eyJraW5kIjoiUGxheWxpc3QiLCJhcGlWZXJzaW9uIjoicGxheWxpc3QuZ3JhZmFuYS5hcHAvdjBhbHBoYTEiLCJtZXRhZGF0YSI6eyJuYW1lIjoic2Rmc2Rmc2RmIiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsInVpZCI6IjNjNzY5YjJlLWFhYTctNDZmNi1hYjgzLWUwMzgwNTBhNmE3NSIsInJlc291cmNlVmVyc2lvbiI6IjEiLCJjcmVhdGlvblRpbWVzdGFtcCI6IjIwMjQtMDYtMDJUMDM6NDk6MjlaIiwibWFuYWdlZEZpZWxkcyI6W3sibWFuYWdlciI6Ik1vemlsbGEiLCJvcGVyYXRpb24iOiJVcGRhdGUiLCJhcGlWZXJzaW9uIjoicGxheWxpc3QuZ3JhZmFuYS5hcHAvdjBhbHBoYTEiLCJ0aW1lIjoiMjAyNC0wNi0wMlQwMzo1Mzo1NVoiLCJmaWVsZHNUeXBlIjoiRmllbGRzVjEiLCJmaWVsZHNWMSI6eyJmOnNwZWMiOnsiZjppbnRlcnZhbCI6e30sImY6aXRlbXMiOnt9LCJmOnRpdGxlIjp7fX19fV19LCJzcGVjIjp7InRpdGxlIjoieHpjdnp4Y3Zxd2Vxd2UiLCJpbnRlcnZhbCI6IjVtIiwiaXRlbXMiOlt7InR5cGUiOiJkYXNoYm9hcmRfYnlfdWlkIiwidmFsdWUiOiJjZG5pdjUzZ21kNHcwZSJ9XX19Cg==",
"title": "xzcvzxcvqweqwe",
"created_at": 1717300169240,
"origin": {}
},
"previous_version": 1
}

View File

@ -1,21 +0,0 @@
{
"guid": "5842f146-07b9-405b-af76-b4c4b2612518",
"resource_version": 6,
"group": "playlist.grafana.app",
"resource": "playlists",
"namespace": "default",
"name": "sdfsdfsdf",
"group_version": "v0alpha1",
"key": "/group/playlist.grafana.app/resource/playlists/namespace/default/name/sdfsdfsdf",
"meta": "eyJtZXRhZGF0YSI6eyJuYW1lIjoic2Rmc2Rmc2RmIiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsInVpZCI6IjAyZmVhOGVlLTk2ZDYtNGIzMy04ZGI5LTU5MmI0NzU4NTM4NSIsImNyZWF0aW9uVGltZXN0YW1wIjoiMjAyNC0wNi0wNFQxNToxODozNFoiLCJtYW5hZ2VkRmllbGRzIjpbeyJtYW5hZ2VyIjoiTW96aWxsYSIsIm9wZXJhdGlvbiI6IlVwZGF0ZSIsImFwaVZlcnNpb24iOiJwbGF5bGlzdC5ncmFmYW5hLmFwcC92MGFscGhhMSIsInRpbWUiOiIyMDI0LTA2LTA0VDE1OjE4OjM0WiIsImZpZWxkc1R5cGUiOiJGaWVsZHNWMSIsImZpZWxkc1YxIjp7ImY6c3BlYyI6eyJmOmludGVydmFsIjp7fSwiZjppdGVtcyI6e30sImY6dGl0bGUiOnt9fX19XX19",
"body": "eyJraW5kIjoiUGxheWxpc3QiLCJhcGlWZXJzaW9uIjoicGxheWxpc3QuZ3JhZmFuYS5hcHAvdjBhbHBoYTEiLCJtZXRhZGF0YSI6eyJuYW1lIjoic2Rmc2Rmc2RmIiwibmFtZXNwYWNlIjoiZGVmYXVsdCIsInVpZCI6IjAyZmVhOGVlLTk2ZDYtNGIzMy04ZGI5LTU5MmI0NzU4NTM4NSIsImNyZWF0aW9uVGltZXN0YW1wIjoiMjAyNC0wNi0wNFQxNToxODozNFoiLCJtYW5hZ2VkRmllbGRzIjpbeyJtYW5hZ2VyIjoiTW96aWxsYSIsIm9wZXJhdGlvbiI6IlVwZGF0ZSIsImFwaVZlcnNpb24iOiJwbGF5bGlzdC5ncmFmYW5hLmFwcC92MGFscGhhMSIsInRpbWUiOiIyMDI0LTA2LTA0VDE1OjE4OjM0WiIsImZpZWxkc1R5cGUiOiJGaWVsZHNWMSIsImZpZWxkc1YxIjp7ImY6c3BlYyI6eyJmOmludGVydmFsIjp7fSwiZjppdGVtcyI6e30sImY6dGl0bGUiOnt9fX19XX0sInNwZWMiOnsidGl0bGUiOiJ4emN2enhjdiIsImludGVydmFsIjoiNW0iLCJpdGVtcyI6W3sidHlwZSI6ImRhc2hib2FyZF9ieV91aWQiLCJ2YWx1ZSI6ImNkbml2NTNnbWQ0dzBlIn1dfX0K",
"title": "xzcvzxcv",
"size": 540,
"ETag": "3225612903101e3b94f458cfc79baf89",
"created_at": 1717514314565,
"created_by": "user:1:admin",
"updated_at": 1717514314565,
"updated_by": "user:1:admin",
"origin": {},
"action": 1
}

View File

@ -1,3 +0,0 @@
SELECT "resource_version", "created_at", "updated_at"
FROM "kind_version"
WHERE 1 = 1 AND "group" = ? AND "resource" = ?;

View File

@ -1,4 +0,0 @@
UPDATE "kind_version"
SET "resource_version" = ? + 1,
"updated_at" = ?
WHERE 1 = 1 AND "group" = ? AND "resource" = ? AND "resource_version" = ?;

View File

@ -1,3 +0,0 @@
INSERT INTO "kind_version"
("group", "resource", "resource_version", "created_at", "updated_at")
VALUES (?, ?, 1, ?, ?);

View File

@ -1,3 +0,0 @@
SELECT "resource_version"
FROM "kind_version"
WHERE 1 = 1 AND "group" = ? AND "resource" = ? FOR UPDATE;

View File

@ -1,3 +0,0 @@
SELECT "resource_version"
FROM "kind_version"
WHERE 1 = 1 AND "group" = $1 AND "resource" = $2 FOR UPDATE;

View File

@ -1,3 +0,0 @@
SELECT "resource_version"
FROM "kind_version"
WHERE 1 = 1 AND "group" = ? AND "resource" = ?;

View File

@ -1,155 +0,0 @@
// 🌟 This was machine generated. Do not edit. 🌟
//
// Frame[0]
// Name:
// Dimensions: 7 Fields by 4 Rows
// +----------------+----------------+----------------+---------------+---------------+---------------+----------------------------------------------------------------------------------------------------------------------------------------+
// | Name: UID | Name: name | Name: slug | Name: depth | Name: left | Name: right | Name: tree |
// | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: |
// | Type: []string | Type: []string | Type: []string | Type: []int32 | Type: []int32 | Type: []int32 | Type: []json.RawMessage |
// +----------------+----------------+----------------+---------------+---------------+---------------+----------------------------------------------------------------------------------------------------------------------------------------+
// | | Root | | 0 | 1 | 8 | [] |
// | A | A | /a/ | 1 | 2 | 5 | [{"guid":"GA","uid":"A","name":"A","slug":"/a/","ParentUID":""}] |
// | AA | AA | /a/aa/ | 2 | 3 | 4 | [{"guid":"GA","uid":"A","name":"A","slug":"/a/","ParentUID":""},{"guid":"GAA","uid":"AA","name":"AA","slug":"/a/aa/","ParentUID":"A"}] |
// | B | B | /b/ | 1 | 6 | 7 | [{"guid":"GB","uid":"B","name":"B","slug":"/b/","ParentUID":""}] |
// +----------------+----------------+----------------+---------------+---------------+---------------+----------------------------------------------------------------------------------------------------------------------------------------+
//
//
// 🌟 This was machine generated. Do not edit. 🌟
{
"status": 200,
"frames": [
{
"schema": {
"fields": [
{
"name": "UID",
"type": "string",
"typeInfo": {
"frame": "string"
}
},
{
"name": "name",
"type": "string",
"typeInfo": {
"frame": "string"
}
},
{
"name": "slug",
"type": "string",
"typeInfo": {
"frame": "string"
}
},
{
"name": "depth",
"type": "number",
"typeInfo": {
"frame": "int32"
}
},
{
"name": "left",
"type": "number",
"typeInfo": {
"frame": "int32"
}
},
{
"name": "right",
"type": "number",
"typeInfo": {
"frame": "int32"
}
},
{
"name": "tree",
"type": "other",
"typeInfo": {
"frame": "json.RawMessage"
}
}
]
},
"data": {
"values": [
[
"",
"A",
"AA",
"B"
],
[
"Root",
"A",
"AA",
"B"
],
[
"",
"/a/",
"/a/aa/",
"/b/"
],
[
0,
1,
2,
1
],
[
1,
2,
3,
6
],
[
8,
5,
4,
7
],
[
[],
[
{
"guid": "GA",
"uid": "A",
"name": "A",
"slug": "/a/",
"ParentUID": ""
}
],
[
{
"guid": "GA",
"uid": "A",
"name": "A",
"slug": "/a/",
"ParentUID": ""
},
{
"guid": "GAA",
"uid": "AA",
"name": "AA",
"slug": "/a/aa/",
"ParentUID": "A"
}
],
[
{
"guid": "GB",
"uid": "B",
"name": "B",
"slug": "/b/",
"ParentUID": ""
}
]
]
]
}
}
]
}

View File

@ -1,203 +0,0 @@
package sqlstash
import (
"cmp"
"context"
"fmt"
"maps"
"time"
folder "github.com/grafana/grafana/pkg/apis/folder/v0alpha1"
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
"github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
)
func (s *sqlEntityServer) Update(ctx context.Context, r *entity.UpdateEntityRequest) (*entity.UpdateEntityResponse, error) {
ctx, span := s.tracer.Start(ctx, "storage_server.Update")
defer span.End()
if err := s.Init(); err != nil {
return nil, err
}
key, err := grafanaregistry.ParseKey(r.Entity.Key)
if err != nil {
return nil, fmt.Errorf("update entity: parse entity key: %w", err)
}
updatedBy, err := getCurrentUser(ctx)
if err != nil {
return nil, fmt.Errorf("update entity: get user from context: %w", err)
}
ret := new(entity.UpdateEntityResponse)
err = s.sqlDB.WithTx(ctx, ReadCommitted, func(ctx context.Context, tx db.Tx) error {
// Pre-locking: get the latest version of the entity
oldEntity, err := readEntity(ctx, tx, s.sqlDialect, key, r.PreviousVersion, true, false)
if err != nil {
return err
}
// build the entity from the request and the old data
newEntity, err := entityForUpdate(updatedBy, oldEntity.Entity, r.Entity)
if err != nil {
return fmt.Errorf("")
}
keepLabels, insertLabels := diffLabels(oldEntity.Entity.Labels, r.Entity.Labels)
// Pre-locking: delete old labels
if len(keepLabels) > 0 {
delLabelsReq := sqlEntityLabelsDeleteRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
GUID: oldEntity.Guid,
KeepLabels: keepLabels,
}
_, err = exec(ctx, tx, sqlEntityLabelsDelete, delLabelsReq)
if err != nil {
return fmt.Errorf("delete old labels: %w", err)
}
}
// Pre-locking: insert new labels
if len(insertLabels) > 0 {
insLabelsReq := sqlEntityLabelsInsertRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
GUID: oldEntity.Guid,
Labels: insertLabels,
}
_, err = exec(ctx, tx, sqlEntityLabelsInsert, insLabelsReq)
if err != nil {
return fmt.Errorf("insert new labels: %w", err)
}
}
// up to this point, we have done all the work possible before having to
// lock kind_version
// 1. Atomically increpement resource version for this kind
newVersion, err := kindVersionAtomicInc(ctx, tx, s.sqlDialect, key.Group, key.Resource)
if err != nil {
return err
}
newEntity.ResourceVersion = newVersion
// 2. Update entity
updEntityReq := sqlEntityUpdateRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
Entity: newEntity,
}
if _, err = exec(ctx, tx, sqlEntityUpdate, updEntityReq); err != nil {
return fmt.Errorf("update entity: %w", err)
}
// 3. Insert into entity history
insEntity := sqlEntityInsertRequest{
SQLTemplate: sqltemplate.New(s.sqlDialect),
Entity: newEntity,
}
if _, err = exec(ctx, tx, sqlEntityInsert, insEntity); err != nil {
return fmt.Errorf("insert into entity_history: %w", err)
}
// 4. Rebuild the whole folder tree structure if we're updating a folder
if newEntity.Group == folder.GROUP && newEntity.Resource == folder.RESOURCE {
if err = s.updateFolderTree(ctx, tx, key.Namespace); err != nil {
return fmt.Errorf("rebuild folder tree structure: %w", err)
}
}
// success
ret.Entity = newEntity.Entity
ret.Status = entity.UpdateEntityResponse_UPDATED
return nil
})
if err != nil {
// TODO: should we define the "Error" field here and how? (i.e. how
// to determine what information can be disclosed to the user?)
return nil, fmt.Errorf("update entity: %w", err)
}
return ret, nil
}
func diffLabels(oldLabels, newLabels map[string]string) (keepLabels []string, insertLabels map[string]string) {
insertLabels = maps.Clone(newLabels)
for oldk, oldv := range oldLabels {
newv, ok := insertLabels[oldk]
if ok && oldv == newv {
keepLabels = append(keepLabels, oldk)
delete(insertLabels, oldk)
}
}
return keepLabels, insertLabels
}
// entityForUpdate populates a *returnsEntity taking the relevant parts from
// the requested update and keeping the necessary values from the old one.
func entityForUpdate(updatedBy string, oldEntity, newEntity *entity.Entity) (*returnsEntity, error) {
newOrigin := ptrOr(newEntity.Origin)
oldOrigin := ptrOr(oldEntity.Origin)
ret := &returnsEntity{
Entity: &entity.Entity{
Guid: oldEntity.Guid, // read-only
// ResourceVersion is later set after reading `kind_version` table
Key: oldEntity.Key, // read-only
Group: oldEntity.Group, // read-only
GroupVersion: cmp.Or(newEntity.GroupVersion, oldEntity.GroupVersion),
Resource: oldEntity.Resource, // read-only
Namespace: oldEntity.Namespace, // read-only
Name: oldEntity.Name, // read-only
Folder: cmp.Or(newEntity.Folder, oldEntity.Folder),
Meta: sliceOr(newEntity.Meta, oldEntity.Meta),
Body: sliceOr(newEntity.Body, oldEntity.Body),
Status: sliceOr(newEntity.Status, oldEntity.Status),
Size: int64(cmp.Or(len(newEntity.Body), len(oldEntity.Body))),
ETag: cmp.Or(newEntity.ETag, oldEntity.ETag),
CreatedAt: oldEntity.CreatedAt, // read-only
CreatedBy: oldEntity.CreatedBy, // read-only
UpdatedAt: time.Now().UnixMilli(),
UpdatedBy: updatedBy,
Origin: &entity.EntityOriginInfo{
Source: cmp.Or(newOrigin.Source, oldOrigin.Source),
Key: cmp.Or(newOrigin.Key, oldOrigin.Key),
Time: cmp.Or(newOrigin.Time, oldOrigin.Time),
},
Title: cmp.Or(newEntity.Title, oldEntity.Title),
Slug: cmp.Or(newEntity.Slug, oldEntity.Slug),
Description: cmp.Or(newEntity.Description, oldEntity.Description),
Message: cmp.Or(newEntity.Message, oldEntity.Message),
Labels: mapOr(newEntity.Labels, oldEntity.Labels),
Fields: mapOr(newEntity.Fields, oldEntity.Fields),
Errors: newEntity.Errors,
Action: entity.Entity_UPDATED,
},
}
if len(newEntity.Body) != 0 ||
len(newEntity.Meta) != 0 ||
len(newEntity.Status) != 0 {
ret.ETag = createETag(ret.Body, ret.Meta, ret.Status)
}
if err := ret.marshal(); err != nil {
return nil, fmt.Errorf("serialize entity data for db: %w", err)
}
return ret, nil
}

View File

@ -1,151 +0,0 @@
package sqlstash
import (
"context"
"crypto/md5"
"database/sql"
"encoding/hex"
"fmt"
"text/template"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
)
func createETag(body []byte, meta []byte, status []byte) string {
// TODO: can we change this to something more modern like sha256?
h := md5.New()
_, _ = h.Write(meta)
_, _ = h.Write(body)
_, _ = h.Write(status)
hash := h.Sum(nil)
return hex.EncodeToString(hash[:])
}
// getCurrentUser returns a string identifying the user making a request with
// the given context.
func getCurrentUser(ctx context.Context) (string, error) {
user, err := identity.GetRequester(ctx)
if err != nil || user == nil {
return "", fmt.Errorf("%w: %w", ErrUserNotFoundInContext, err)
}
return user.GetUID(), nil
}
// ptrOr returns the first non-nil pointer in the list or a new non-nil pointer.
func ptrOr[P ~*E, E any](ps ...P) P {
for _, p := range ps {
if p != nil {
return p
}
}
return P(new(E))
}
// sliceOr returns the first slice that has at least one element, or a new empty
// slice.
func sliceOr[S ~[]E, E comparable](vals ...S) S {
for _, s := range vals {
if len(s) > 0 {
return s
}
}
return S{}
}
// mapOr returns the first map that has at least one element, or a new empty
// map.
func mapOr[M ~map[K]V, K comparable, V any](vals ...M) M {
for _, m := range vals {
if len(m) > 0 {
return m
}
}
return M{}
}
// queryRow uses `req` as input and output for a single-row returning query
// generated with `tmpl`, and executed in `x`.
func queryRow[T any](ctx context.Context, x db.ContextExecer, tmpl *template.Template, req sqltemplate.WithResults[T]) (T, error) {
var zero T
if err := req.Validate(); err != nil {
return zero, fmt.Errorf("query: invalid request for template %q: %w",
tmpl.Name(), err)
}
rawQuery, err := sqltemplate.Execute(tmpl, req)
if err != nil {
return zero, fmt.Errorf("execute template: %w", err)
}
query := sqltemplate.FormatSQL(rawQuery)
row := x.QueryRowContext(ctx, query, req.GetArgs()...)
if err := row.Err(); err != nil {
return zero, SQLError{
Err: err,
CallType: "QueryRow",
TemplateName: tmpl.Name(),
arguments: req.GetArgs(),
ScanDest: req.GetScanDest(),
Query: query,
RawQuery: rawQuery,
}
}
return scanRow(row, req)
}
type scanner interface {
Scan(dest ...any) error
}
// scanRow is used on *sql.Row and *sql.Rows, and is factored out here not to
// improving code reuse, but rather for ease of testing.
func scanRow[T any](sc scanner, req sqltemplate.WithResults[T]) (zero T, err error) {
if err = sc.Scan(req.GetScanDest()...); err != nil {
return zero, fmt.Errorf("row scan: %w", err)
}
res, err := req.Results()
if err != nil {
return zero, fmt.Errorf("row results: %w", err)
}
return res, nil
}
// exec uses `req` as input for a non-data returning query generated with
// `tmpl`, and executed in `x`.
func exec(ctx context.Context, x db.ContextExecer, tmpl *template.Template, req sqltemplate.SQLTemplateIface) (sql.Result, error) {
if err := req.Validate(); err != nil {
return nil, fmt.Errorf("exec: invalid request for template %q: %w",
tmpl.Name(), err)
}
rawQuery, err := sqltemplate.Execute(tmpl, req)
if err != nil {
return nil, fmt.Errorf("execute template: %w", err)
}
query := sqltemplate.FormatSQL(rawQuery)
res, err := x.ExecContext(ctx, query, req.GetArgs()...)
if err != nil {
return nil, SQLError{
Err: err,
CallType: "Exec",
TemplateName: tmpl.Name(),
arguments: req.GetArgs(),
Query: query,
RawQuery: rawQuery,
}
}
return res, nil
}

View File

@ -1,525 +0,0 @@
package sqlstash
import (
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"io"
"regexp"
"strings"
"testing"
"text/template"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/services/store/entity/db/dbimpl"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
sqltemplateMocks "github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate/mocks"
"github.com/grafana/grafana/pkg/util/testutil"
)
// newMockDBNopSQL returns a db.DB and a sqlmock.Sqlmock that doesn't validates
// SQL. This is only meant to be used to test wrapping utilities exec, query and
// queryRow, where the actual SQL is not relevant to the unit tests, but rather
// how the possible derived error conditions handled.
func newMockDBNopSQL(t *testing.T) (db.DB, sqlmock.Sqlmock) {
t.Helper()
db, mock, err := sqlmock.New(
sqlmock.MonitorPingsOption(true),
sqlmock.QueryMatcherOption(sqlmock.QueryMatcherFunc(
func(expectedSQL, actualSQL string) error {
return nil
},
)),
)
return newUnitTestDB(t, db, mock, err)
}
// newMockDBMatchWords returns a db.DB and a sqlmock.Sqlmock that will match SQL
// by splitting the expected SQL string into words, and then try to find all of
// them in the actual SQL, in the given order, case insensitively. Prepend a
// word with a `!` to say that word should not be found.
func newMockDBMatchWords(t *testing.T) (db.DB, sqlmock.Sqlmock) {
t.Helper()
db, mock, err := sqlmock.New(
sqlmock.MonitorPingsOption(true),
sqlmock.QueryMatcherOption(
sqlmock.QueryMatcherFunc(func(expectedSQL, actualSQL string) error {
actualSQL = strings.ToLower(sqltemplate.FormatSQL(actualSQL))
expectedSQL = strings.ToLower(expectedSQL)
var offset int
for _, vv := range mockDBMatchWordsRE.FindAllStringSubmatch(expectedSQL, -1) {
v := vv[1]
var shouldNotMatch bool
if v != "" && v[0] == '!' {
v = v[1:]
shouldNotMatch = true
}
if v == "" {
return fmt.Errorf("invalid expected word %q in %q", v,
expectedSQL)
}
reWord, err := regexp.Compile(`\b` + regexp.QuoteMeta(v) + `\b`)
if err != nil {
return fmt.Errorf("compile word %q from expected SQL: %s", v,
expectedSQL)
}
if shouldNotMatch {
if reWord.MatchString(actualSQL[offset:]) {
return fmt.Errorf("actual SQL fragent should not cont"+
"ain %q but it does\n\tFragment: %s\n\tFull SQL: %s",
v, actualSQL[offset:], actualSQL)
}
} else {
loc := reWord.FindStringIndex(actualSQL[offset:])
if len(loc) == 0 {
return fmt.Errorf("actual SQL fragment should contain "+
"%q but it doesn't\n\tFragment: %s\n\tFull SQL: %s",
v, actualSQL[offset:], actualSQL)
}
offset = loc[1] // advance the offset
}
}
return nil
},
),
),
)
return newUnitTestDB(t, db, mock, err)
}
var mockDBMatchWordsRE = regexp.MustCompile(`(?:\W|\A)(!?\w+)\b`)
func newUnitTestDB(t *testing.T, db *sql.DB, mock sqlmock.Sqlmock, err error) (db.DB, sqlmock.Sqlmock) {
t.Helper()
require.NoError(t, err)
return dbimpl.NewDB(db, "sqlmock"), mock
}
// mockResults aids in testing code paths with queries returning large number of
// values, like those returning *entity.Entity. This is because we want to
// emulate returning the same row columns and row values the same as a real
// database would do. This utility the same template SQL that is expected to be
// used to help populate all the expected fields.
// fileds
type mockResults[T any] struct {
t *testing.T
tmpl *template.Template
data sqltemplate.WithResults[T]
rows *sqlmock.Rows
}
// newMockResults returns a new *mockResults. If you want to emulate a call
// returning zero rows, then immediately call the Row method afterward.
func newMockResults[T any](t *testing.T, mock sqlmock.Sqlmock, tmpl *template.Template, data sqltemplate.WithResults[T]) *mockResults[T] {
t.Helper()
data.Reset()
err := tmpl.Execute(io.Discard, data)
require.NoError(t, err)
rows := mock.NewRows(data.GetColNames())
return &mockResults[T]{
t: t,
tmpl: tmpl,
data: data,
rows: rows,
}
}
// AddCurrentData uses the values contained in the `data` argument used during
// creation to populate a new expected row. It will access `data` with pointers,
// so you should replace the internal values of `data` with freshly allocated
// results to return different rows.
func (r *mockResults[T]) AddCurrentData() *mockResults[T] {
r.t.Helper()
r.data.Reset()
err := r.tmpl.Execute(io.Discard, r.data)
require.NoError(r.t, err)
d := r.data.GetScanDest()
dv := make([]driver.Value, len(d))
for i, v := range d {
dv[i] = v
}
r.rows.AddRow(dv...)
return r
}
// Rows returns the *sqlmock.Rows object built.
func (r *mockResults[T]) Rows() *sqlmock.Rows {
return r.rows
}
func TestCreateETag(t *testing.T) {
t.Parallel()
v := createETag(nil, nil, nil)
require.Equal(t, "d41d8cd98f00b204e9800998ecf8427e", v)
}
func TestGetCurrentUser(t *testing.T) {
t.Parallel()
ctx := testutil.NewDefaultTestContext(t)
username, err := getCurrentUser(ctx)
require.NotEmpty(t, username)
require.NoError(t, err)
ctx = ctx.WithUser(nil)
username, err = getCurrentUser(ctx)
require.Empty(t, username)
require.Error(t, err)
require.ErrorIs(t, err, ErrUserNotFoundInContext)
}
func TestPtrOr(t *testing.T) {
t.Parallel()
p := ptrOr[*int]()
require.NotNil(t, p)
require.Zero(t, *p)
p = ptrOr[*int](nil, nil, nil, nil, nil, nil)
require.NotNil(t, p)
require.Zero(t, *p)
v := 42
v2 := 5
p = ptrOr(nil, nil, nil, &v, nil, &v2, nil, nil)
require.NotNil(t, p)
require.Equal(t, v, *p)
p = ptrOr(nil, nil, nil, &v)
require.NotNil(t, p)
require.Equal(t, v, *p)
}
func TestSliceOr(t *testing.T) {
t.Parallel()
p := sliceOr[[]int]()
require.NotNil(t, p)
require.Len(t, p, 0)
p = sliceOr[[]int](nil, nil, nil, nil)
require.NotNil(t, p)
require.Len(t, p, 0)
p = sliceOr([]int{}, []int{}, []int{}, []int{})
require.NotNil(t, p)
require.Len(t, p, 0)
v := []int{1, 2}
p = sliceOr([]int{}, nil, []int{}, v, nil, []int{}, []int{10}, nil)
require.NotNil(t, p)
require.Equal(t, v, p)
p = sliceOr([]int{}, nil, []int{}, v)
require.NotNil(t, p)
require.Equal(t, v, p)
}
func TestMapOr(t *testing.T) {
t.Parallel()
p := mapOr[map[string]int]()
require.NotNil(t, p)
require.Len(t, p, 0)
p = mapOr(nil, map[string]int(nil), nil, map[string]int{}, nil)
require.NotNil(t, p)
require.Len(t, p, 0)
v := map[string]int{"a": 0, "b": 1}
v2 := map[string]int{"c": 2, "d": 3}
p = mapOr(nil, map[string]int(nil), v, v2, nil, map[string]int{}, nil)
require.NotNil(t, p)
require.Equal(t, v, p)
p = mapOr(nil, map[string]int(nil), v)
require.NotNil(t, p)
require.Equal(t, v, p)
}
var (
validTestTmpl = template.Must(template.New("test").Parse("nothing special"))
invalidTestTmpl = template.New("no definition should fail to exec")
errTest = errors.New("because of reasons")
)
// expectRows is a testing helper to keep mocks in sync when adding rows to a
// mocked SQL result. This is a helper to test `query` and `queryRow`.
type expectRows[T any] struct {
*sqlmock.Rows
ExpectedResults []T
req *sqltemplateMocks.WithResults[T]
}
func newReturnsRow[T any](dbmock sqlmock.Sqlmock, req *sqltemplateMocks.WithResults[T]) *expectRows[T] {
return &expectRows[T]{
Rows: dbmock.NewRows(nil),
req: req,
}
}
// Add adds a new value that should be returned by the `query` or `queryRow`
// operation.
func (r *expectRows[T]) Add(value T, err error) *expectRows[T] {
r.req.EXPECT().GetScanDest().Return(nil).Once()
r.req.EXPECT().Results().Return(value, err).Once()
r.Rows.AddRow()
r.ExpectedResults = append(r.ExpectedResults, value)
return r
}
func TestQueryRow(t *testing.T) {
t.Parallel()
t.Run("happy path", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
req := sqltemplateMocks.NewWithResults[int64](t)
db, dbmock := newMockDBNopSQL(t)
rows := newReturnsRow(dbmock, req)
// setup expectations
req.EXPECT().Validate().Return(nil).Once()
req.EXPECT().GetArgs().Return(nil).Once()
rows.Add(1, nil)
dbmock.ExpectQuery("").WillReturnRows(rows.Rows)
// execute and assert
res, err := queryRow(ctx, db, validTestTmpl, req)
require.NoError(t, err)
require.Equal(t, rows.ExpectedResults[0], res)
})
t.Run("invalid request", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
req := sqltemplateMocks.NewWithResults[int64](t)
db, _ := newMockDBNopSQL(t)
// setup expectations
req.EXPECT().Validate().Return(errTest).Once()
// execute and assert
res, err := queryRow(ctx, db, invalidTestTmpl, req)
require.Zero(t, res)
require.Error(t, err)
require.ErrorContains(t, err, "invalid request")
})
t.Run("error executing template", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
req := sqltemplateMocks.NewWithResults[int64](t)
db, _ := newMockDBNopSQL(t)
// setup expectations
req.EXPECT().Validate().Return(nil).Once()
// execute and assert
res, err := queryRow(ctx, db, invalidTestTmpl, req)
require.Zero(t, res)
require.Error(t, err)
require.ErrorContains(t, err, "execute template")
})
t.Run("error executing query", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
req := sqltemplateMocks.NewWithResults[int64](t)
db, dbmock := newMockDBNopSQL(t)
// setup expectations
req.EXPECT().Validate().Return(nil).Once()
req.EXPECT().GetArgs().Return(nil)
req.EXPECT().GetScanDest().Return(nil).Maybe()
dbmock.ExpectQuery("").WillReturnError(errTest)
// execute and assert
res, err := queryRow(ctx, db, validTestTmpl, req)
require.Zero(t, res)
require.Error(t, err)
require.ErrorAs(t, err, new(SQLError))
})
}
// scannerFunc is an adapter for the `scanner` interface.
type scannerFunc func(dest ...any) error
func (f scannerFunc) Scan(dest ...any) error {
return f(dest...)
}
func TestScanRow(t *testing.T) {
t.Parallel()
const value int64 = 1
t.Run("happy path", func(t *testing.T) {
t.Parallel()
// test declarations
req := sqltemplateMocks.NewWithResults[int64](t)
sc := scannerFunc(func(dest ...any) error {
return nil
})
// setup expectations
req.EXPECT().GetScanDest().Return(nil).Once()
req.EXPECT().Results().Return(value, nil).Once()
// execute and assert
res, err := scanRow(sc, req)
require.NoError(t, err)
require.Equal(t, value, res)
})
t.Run("scan error", func(t *testing.T) {
t.Parallel()
// test declarations
req := sqltemplateMocks.NewWithResults[int64](t)
sc := scannerFunc(func(dest ...any) error {
return errTest
})
// setup expectations
req.EXPECT().GetScanDest().Return(nil).Once()
// execute and assert
res, err := scanRow(sc, req)
require.Zero(t, res)
require.Error(t, err)
require.ErrorIs(t, err, errTest)
})
t.Run("results error", func(t *testing.T) {
t.Parallel()
// test declarations
req := sqltemplateMocks.NewWithResults[int64](t)
sc := scannerFunc(func(dest ...any) error {
return nil
})
// setup expectations
req.EXPECT().GetScanDest().Return(nil).Once()
req.EXPECT().Results().Return(0, errTest).Once()
// execute and assert
res, err := scanRow(sc, req)
require.Zero(t, res)
require.Error(t, err)
require.ErrorIs(t, err, errTest)
})
}
func TestExec(t *testing.T) {
t.Parallel()
t.Run("happy path", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
req := sqltemplateMocks.NewSQLTemplateIface(t)
db, dbmock := newMockDBNopSQL(t)
// setup expectations
req.EXPECT().Validate().Return(nil).Once()
req.EXPECT().GetArgs().Return(nil).Once()
dbmock.ExpectExec("").WillReturnResult(sqlmock.NewResult(0, 0))
// execute and assert
res, err := exec(ctx, db, validTestTmpl, req)
require.NoError(t, err)
require.NotNil(t, res)
})
t.Run("invalid request", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
req := sqltemplateMocks.NewSQLTemplateIface(t)
db, _ := newMockDBNopSQL(t)
// setup expectations
req.EXPECT().Validate().Return(errTest).Once()
// execute and assert
res, err := exec(ctx, db, invalidTestTmpl, req)
require.Nil(t, res)
require.Error(t, err)
require.ErrorContains(t, err, "invalid request")
})
t.Run("error executing template", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
req := sqltemplateMocks.NewSQLTemplateIface(t)
db, _ := newMockDBNopSQL(t)
// setup expectations
req.EXPECT().Validate().Return(nil).Once()
// execute and assert
res, err := exec(ctx, db, invalidTestTmpl, req)
require.Nil(t, res)
require.Error(t, err)
require.ErrorContains(t, err, "execute template")
})
t.Run("error executing SQL", func(t *testing.T) {
t.Parallel()
// test declarations
ctx := testutil.NewDefaultTestContext(t)
req := sqltemplateMocks.NewSQLTemplateIface(t)
db, dbmock := newMockDBNopSQL(t)
// setup expectations
req.EXPECT().Validate().Return(nil).Once()
req.EXPECT().GetArgs().Return(nil)
dbmock.ExpectExec("").WillReturnError(errTest)
// execute and assert
res, err := exec(ctx, db, validTestTmpl, req)
require.Nil(t, res)
require.Error(t, err)
require.ErrorAs(t, err, new(SQLError))
})
}

View File

@ -1,22 +0,0 @@
package sqlstash
import "github.com/grafana/grafana/pkg/services/store/entity"
// validateEntity validates a fully loaded *entity.Entity model, and should be
// used before storing an entity to the database and before returning it to the
// user.
func validateEntity(*entity.Entity) error {
return nil // TODO
}
// validateLabels validates the given map of label names to their values.
func validateLabels(map[string]string) error {
// this should be called by validateEntity
return nil // TODO
}
// validateFields validates the given map of fields names to their values.
func validateFields(map[string]string) error {
// this should be called by validateEntity
return nil // TODO
}

View File

@ -1,35 +0,0 @@
package sqlstash
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestValidateEntity(t *testing.T) {
t.Parallel()
err := validateEntity(newEmptyEntity())
require.NoError(t, err)
}
func TestValidateLabels(t *testing.T) {
t.Parallel()
err := validateLabels(map[string]string{})
require.NoError(t, err)
}
func TestValidateFields(t *testing.T) {
t.Parallel()
err := validateFields(map[string]string{})
require.NoError(t, err)
}
// Silence the `unused` linter until we implement and use these validations.
var (
_ = validateEntity
_ = validateLabels
_ = validateFields
)

View File

@ -1,104 +0,0 @@
package entity_server_tests
import (
"context"
"testing"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/components/satokengen"
"github.com/grafana/grafana/pkg/server"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/org"
saAPI "github.com/grafana/grafana/pkg/services/serviceaccounts/api"
saTests "github.com/grafana/grafana/pkg/services/serviceaccounts/tests"
"github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/services/store/entity/db/dbimpl"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/tests/testinfra"
"github.com/grafana/grafana/pkg/tests/testsuite"
)
func TestMain(m *testing.M) {
testsuite.Run(m)
}
func createServiceAccountAdminToken(t *testing.T, env *server.TestEnv) (string, *user.SignedInUser) {
t.Helper()
account := saTests.SetupUserServiceAccount(t, env.SQLStore, env.Cfg, saTests.TestUser{
Name: "grpc-server-sa",
Role: string(org.RoleAdmin),
Login: "grpc-server-sa",
IsServiceAccount: true,
})
keyGen, err := satokengen.New(saAPI.ServiceID)
require.NoError(t, err)
_ = saTests.SetupApiKey(t, env.SQLStore, env.Cfg, saTests.TestApiKey{
Name: "grpc-server-test",
Role: org.RoleAdmin,
OrgId: account.OrgID,
Key: keyGen.HashedKey,
ServiceAccountID: &account.ID,
})
return keyGen.ClientSecret, &user.SignedInUser{
UserID: account.ID,
Email: account.Email,
Name: account.Name,
Login: account.Login,
OrgID: account.OrgID,
IsServiceAccount: account.IsServiceAccount,
}
}
type testContext struct {
authToken string
client entity.EntityStoreClient
user *user.SignedInUser
ctx context.Context
}
func createTestContext(t *testing.T) testContext {
t.Helper()
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{
featuremgmt.FlagGrpcServer,
featuremgmt.FlagUnifiedStorage,
},
AppModeProduction: false, // required for migrations to run
GRPCServerAddress: "127.0.0.1:0", // :0 for choosing the port automatically
})
_, env := testinfra.StartGrafanaEnv(t, dir, path)
authToken, serviceAccountUser := createServiceAccountAdminToken(t, env)
eDB, err := dbimpl.ProvideEntityDB(env.SQLStore, env.Cfg, env.FeatureToggles, nil)
require.NoError(t, err)
err = eDB.Init()
require.NoError(t, err)
traceConfig, err := tracing.ParseTracingConfig(env.Cfg)
require.NoError(t, err)
tracer, err := tracing.ProvideService(traceConfig)
require.NoError(t, err)
store, err := sqlstash.ProvideSQLEntityServer(eDB, tracer)
require.NoError(t, err)
client := entity.NewEntityStoreClientLocal(store)
return testContext{
authToken: authToken,
client: client,
user: serviceAccountUser,
ctx: identity.WithRequester(context.Background(), serviceAccountUser),
}
}

View File

@ -1,566 +0,0 @@
package entity_server_tests
import (
_ "embed"
"encoding/json"
"fmt"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/services/store/entity"
)
var (
//go:embed testdata/dashboard-with-tags-b-g.json
dashboardWithTagsBlueGreen string
//go:embed testdata/dashboard-with-tags-r-g.json
dashboardWithTagsRedGreen string
)
type rawEntityMatcher struct {
key string
createdRange []time.Time
updatedRange []time.Time
createdBy string
updatedBy string
body []byte
version int64
}
type objectVersionMatcher struct {
updatedRange []time.Time
updatedBy string
version int64
etag *string
comment *string
}
func timestampInRange(ts int64, tsRange []time.Time) bool {
low := tsRange[0].UnixMilli() - 1
high := tsRange[1].UnixMilli() + 1
return ts >= low && ts <= high
}
func requireEntityMatch(t *testing.T, obj *entity.Entity, m rawEntityMatcher) {
t.Helper()
require.NotNil(t, obj)
mismatches := ""
if m.key != "" && m.key != obj.Key {
mismatches += fmt.Sprintf("expected key: %s, actual: %s\n", m.key, obj.Key)
}
if len(m.createdRange) == 2 && !timestampInRange(obj.CreatedAt, m.createdRange) {
mismatches += fmt.Sprintf("expected Created range: [from %s to %s], actual created: %s\n", m.createdRange[0], m.createdRange[1], time.UnixMilli(obj.CreatedAt))
}
if len(m.updatedRange) == 2 && !timestampInRange(obj.UpdatedAt, m.updatedRange) {
mismatches += fmt.Sprintf("expected Updated range: [from %s to %s], actual updated: %s\n", m.updatedRange[0], m.updatedRange[1], time.UnixMilli(obj.UpdatedAt))
}
if m.createdBy != "" && m.createdBy != obj.CreatedBy {
mismatches += fmt.Sprintf("createdBy: expected: '%s', found: '%s'\n", m.createdBy, obj.CreatedBy)
}
if m.updatedBy != "" && m.updatedBy != obj.UpdatedBy {
mismatches += fmt.Sprintf("updatedBy: expected: '%s', found: '%s'\n", m.updatedBy, obj.UpdatedBy)
}
if len(m.body) > 0 {
if json.Valid(m.body) {
require.JSONEq(t, string(m.body), string(obj.Body), "expecting same body")
} else if !reflect.DeepEqual(m.body, obj.Body) {
mismatches += fmt.Sprintf("expected body len: %d, actual body len: %d\n", len(m.body), len(obj.Body))
}
}
if m.version != 0 && m.version != obj.ResourceVersion {
mismatches += fmt.Sprintf("expected version: %d, actual version: %d\n", m.version, obj.ResourceVersion)
}
require.True(t, len(mismatches) == 0, mismatches)
}
func requireVersionMatch(t *testing.T, obj *entity.Entity, m objectVersionMatcher) {
t.Helper()
mismatches := ""
if m.etag != nil && *m.etag != obj.ETag {
mismatches += fmt.Sprintf("expected etag: %s, actual etag: %s\n", *m.etag, obj.ETag)
}
if len(m.updatedRange) == 2 && !timestampInRange(obj.UpdatedAt, m.updatedRange) {
mismatches += fmt.Sprintf("expected updatedRange range: [from %s to %s], actual updated: %s\n", m.updatedRange[0], m.updatedRange[1], time.UnixMilli(obj.UpdatedAt))
}
if m.updatedBy != "" && m.updatedBy != obj.UpdatedBy {
mismatches += fmt.Sprintf("updatedBy: expected: '%s', found: '%s'\n", m.updatedBy, obj.UpdatedBy)
}
if m.version != 0 && m.version != obj.ResourceVersion {
mismatches += fmt.Sprintf("expected version: %d, actual version: %d\n", m.version, obj.ResourceVersion)
}
require.True(t, len(mismatches) == 0, mismatches)
}
func TestIntegrationEntityServer(t *testing.T) {
// TODO figure out why this still runs into sqlite database locked error
if true {
t.Skip("skipping integration test")
}
if testing.Short() {
t.Skip("skipping integration test")
}
testCtx := createTestContext(t)
ctx := identity.WithRequester(testCtx.ctx, testCtx.user)
fakeUser := testCtx.user.GetUID()
firstVersion := int64(0)
group := "test.grafana.app"
resource := "jsonobjs"
resource2 := "playlists"
namespace := "default"
name := "my-test-entity"
testKey := "/" + group + "/" + resource + "/" + namespace + "/" + name
testKey2 := "/" + group + "/" + resource2 + "/" + namespace + "/" + name
body := []byte("{\"name\":\"John\"}")
t.Run("should not retrieve non-existent objects", func(t *testing.T) {
resp, err := testCtx.client.Read(ctx, &entity.ReadEntityRequest{
Key: testKey,
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Empty(t, resp.Key)
})
t.Run("should be able to read persisted objects", func(t *testing.T) {
before := time.Now()
createReq := &entity.CreateEntityRequest{
Entity: &entity.Entity{
Key: testKey,
Group: group,
Resource: resource,
Namespace: namespace,
Name: name,
Body: body,
Message: "first entity!",
},
}
createResp, err := testCtx.client.Create(ctx, createReq)
require.NoError(t, err)
// clean up in case test fails
t.Cleanup(func() {
_, _ = testCtx.client.Delete(ctx, &entity.DeleteEntityRequest{
Key: testKey,
})
})
versionMatcher := objectVersionMatcher{
// updatedRange: []time.Time{before, time.Now()},
// updatedBy: fakeUser,
version: firstVersion,
comment: &createReq.Entity.Message,
}
requireVersionMatch(t, createResp.Entity, versionMatcher)
readResp, err := testCtx.client.Read(ctx, &entity.ReadEntityRequest{
Key: testKey,
ResourceVersion: 0,
WithBody: true,
})
require.NoError(t, err)
require.NotNil(t, readResp)
require.Equal(t, testKey, readResp.Key)
require.Equal(t, namespace, readResp.Namespace) // orgId becomes the tenant id when not set
require.Equal(t, resource, readResp.Resource)
require.Equal(t, name, readResp.Name)
objectMatcher := rawEntityMatcher{
key: testKey,
createdRange: []time.Time{before, time.Now()},
// updatedRange: []time.Time{before, time.Now()},
createdBy: fakeUser,
// updatedBy: fakeUser,
body: body,
version: firstVersion,
}
requireEntityMatch(t, readResp, objectMatcher)
deleteResp, err := testCtx.client.Delete(ctx, &entity.DeleteEntityRequest{
Key: testKey,
PreviousVersion: readResp.ResourceVersion,
})
require.NoError(t, err)
require.Equal(t, deleteResp.Status, entity.DeleteEntityResponse_DELETED)
readRespAfterDelete, err := testCtx.client.Read(ctx, &entity.ReadEntityRequest{
Key: testKey,
ResourceVersion: 0,
WithBody: true,
})
require.NoError(t, err)
require.Empty(t, readRespAfterDelete.Key)
})
t.Run("should be able to update an object", func(t *testing.T) {
before := time.Now()
createReq := &entity.CreateEntityRequest{
Entity: &entity.Entity{
Key: testKey,
Group: group,
Resource: resource,
Namespace: namespace,
Name: name,
Body: body,
Message: "first entity!",
},
}
createResp, err := testCtx.client.Create(ctx, createReq)
require.NoError(t, err)
// clean up in case test fails
t.Cleanup(func() {
_, _ = testCtx.client.Delete(ctx, &entity.DeleteEntityRequest{
Key: testKey,
})
})
require.Equal(t, entity.CreateEntityResponse_CREATED, createResp.Status)
body2 := []byte("{\"name\":\"John2\"}")
updateReq := &entity.UpdateEntityRequest{
Entity: &entity.Entity{
Key: testKey,
Body: body2,
Message: "update1",
},
}
updateResp, err := testCtx.client.Update(ctx, updateReq)
require.NoError(t, err)
require.NotEqual(t, createResp.Entity.ResourceVersion, updateResp.Entity.ResourceVersion)
// Duplicate write (no change)
/*
writeDupRsp, err := testCtx.client.Update(ctx, updateReq)
require.NoError(t, err)
require.Nil(t, writeDupRsp.Error)
require.Equal(t, entity.UpdateEntityResponse_UNCHANGED, writeDupRsp.Status)
require.Equal(t, updateResp.Entity.ResourceVersion, writeDupRsp.Entity.ResourceVersion)
require.Equal(t, updateResp.Entity.ETag, writeDupRsp.Entity.ETag)
*/
body3 := []byte("{\"name\":\"John3\"}")
writeReq3 := &entity.UpdateEntityRequest{
Entity: &entity.Entity{
Key: testKey,
Body: body3,
Message: "update3",
},
}
writeResp3, err := testCtx.client.Update(ctx, writeReq3)
require.NoError(t, err)
require.Equal(t, entity.UpdateEntityResponse_UPDATED, writeResp3.Status)
require.NotEqual(t, writeResp3.Entity.ResourceVersion, updateResp.Entity.ResourceVersion)
latestMatcher := rawEntityMatcher{
key: testKey,
createdRange: []time.Time{before, time.Now()},
updatedRange: []time.Time{before, time.Now()},
createdBy: fakeUser,
updatedBy: fakeUser,
body: body3,
version: writeResp3.Entity.ResourceVersion,
}
readRespLatest, err := testCtx.client.Read(ctx, &entity.ReadEntityRequest{
Key: testKey,
ResourceVersion: 0, // latest
WithBody: true,
})
require.NoError(t, err)
requireEntityMatch(t, readRespLatest, latestMatcher)
readRespFirstVer, err := testCtx.client.Read(ctx, &entity.ReadEntityRequest{
Key: testKey,
ResourceVersion: createResp.Entity.ResourceVersion,
WithBody: true,
})
require.NoError(t, err)
require.NotNil(t, readRespFirstVer)
requireEntityMatch(t, readRespFirstVer, rawEntityMatcher{
key: testKey,
createdRange: []time.Time{before, time.Now()},
createdBy: fakeUser,
body: body,
version: 0,
})
history, err := testCtx.client.History(ctx, &entity.EntityHistoryRequest{
Key: testKey,
})
require.NoError(t, err)
require.Equal(t, []*entity.Entity{
writeResp3.Entity,
updateResp.Entity,
createResp.Entity,
}, history.Versions)
deleteResp, err := testCtx.client.Delete(ctx, &entity.DeleteEntityRequest{
Key: testKey,
PreviousVersion: writeResp3.Entity.ResourceVersion,
})
require.NoError(t, err)
require.Equal(t, deleteResp.Status, entity.DeleteEntityResponse_DELETED)
})
t.Run("should be able to list objects", func(t *testing.T) {
w1, err := testCtx.client.Create(ctx, &entity.CreateEntityRequest{
Entity: &entity.Entity{
Key: testKey + "1",
Body: body,
},
})
require.NoError(t, err)
w2, err := testCtx.client.Create(ctx, &entity.CreateEntityRequest{
Entity: &entity.Entity{
Key: testKey + "2",
Body: body,
},
})
require.NoError(t, err)
w3, err := testCtx.client.Create(ctx, &entity.CreateEntityRequest{
Entity: &entity.Entity{
Key: testKey2 + "3",
Body: body,
},
})
require.NoError(t, err)
w4, err := testCtx.client.Create(ctx, &entity.CreateEntityRequest{
Entity: &entity.Entity{
Key: testKey2 + "4",
Body: body,
},
})
require.NoError(t, err)
resp, err := testCtx.client.List(ctx, &entity.EntityListRequest{
Resource: []string{resource, resource2},
WithBody: false,
})
require.NoError(t, err)
require.NotNil(t, resp)
names := make([]string, 0, len(resp.Results))
kinds := make([]string, 0, len(resp.Results))
version := make([]int64, 0, len(resp.Results))
for _, res := range resp.Results {
names = append(names, res.Name)
kinds = append(kinds, res.Resource)
version = append(version, res.ResourceVersion)
}
// default sort is by guid, so we ignore order
require.ElementsMatch(t, []string{"my-test-entity1", "my-test-entity2", "my-test-entity3", "my-test-entity4"}, names)
require.ElementsMatch(t, []string{"jsonobjs", "jsonobjs", "playlists", "playlists"}, kinds)
require.ElementsMatch(t, []int64{
w1.Entity.ResourceVersion,
w2.Entity.ResourceVersion,
w3.Entity.ResourceVersion,
w4.Entity.ResourceVersion,
}, version)
// sorted by name
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Resource: []string{resource, resource2},
WithBody: false,
Sort: []string{"name"},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Equal(t, 4, len(resp.Results))
require.Equal(t, "my-test-entity1", resp.Results[0].Name)
require.Equal(t, "my-test-entity2", resp.Results[1].Name)
require.Equal(t, "my-test-entity3", resp.Results[2].Name)
require.Equal(t, "my-test-entity4", resp.Results[3].Name)
require.Equal(t, "jsonobjs", resp.Results[0].Resource)
require.Equal(t, "jsonobjs", resp.Results[1].Resource)
require.Equal(t, "playlists", resp.Results[2].Resource)
require.Equal(t, "playlists", resp.Results[3].Resource)
// sorted by name desc
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Resource: []string{resource, resource2},
WithBody: false,
Sort: []string{"name_desc"},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Equal(t, 4, len(resp.Results))
require.Equal(t, "my-test-entity1", resp.Results[3].Name)
require.Equal(t, "my-test-entity2", resp.Results[2].Name)
require.Equal(t, "my-test-entity3", resp.Results[1].Name)
require.Equal(t, "my-test-entity4", resp.Results[0].Name)
require.Equal(t, "jsonobjs", resp.Results[3].Resource)
require.Equal(t, "jsonobjs", resp.Results[2].Resource)
require.Equal(t, "playlists", resp.Results[1].Resource)
require.Equal(t, "playlists", resp.Results[0].Resource)
// with limit
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Resource: []string{resource, resource2},
WithBody: false,
Limit: 2,
Sort: []string{"name"},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Equal(t, 2, len(resp.Results))
require.Equal(t, "my-test-entity1", resp.Results[0].Name)
require.Equal(t, "my-test-entity2", resp.Results[1].Name)
// with limit & continue
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Resource: []string{resource, resource2},
WithBody: false,
Limit: 2,
NextPageToken: resp.NextPageToken,
Sort: []string{"name"},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Equal(t, 2, len(resp.Results))
require.Equal(t, "my-test-entity3", resp.Results[0].Name)
require.Equal(t, "my-test-entity4", resp.Results[1].Name)
// Again with only one kind
respKind1, err := testCtx.client.List(ctx, &entity.EntityListRequest{
Resource: []string{resource},
Sort: []string{"name"},
})
require.NoError(t, err)
names = make([]string, 0, len(respKind1.Results))
kinds = make([]string, 0, len(respKind1.Results))
version = make([]int64, 0, len(respKind1.Results))
for _, res := range respKind1.Results {
names = append(names, res.Name)
kinds = append(kinds, res.Resource)
version = append(version, res.ResourceVersion)
}
require.Equal(t, []string{"my-test-entity1", "my-test-entity2"}, names)
require.Equal(t, []string{"jsonobjs", "jsonobjs"}, kinds)
require.Equal(t, []int64{
w1.Entity.ResourceVersion,
w2.Entity.ResourceVersion,
}, version)
})
t.Run("should be able to filter objects based on their labels", func(t *testing.T) {
_, err := testCtx.client.Create(ctx, &entity.CreateEntityRequest{
Entity: &entity.Entity{
Key: "/dashboards.grafana.app/dashboards/default/blue-green",
Body: []byte(dashboardWithTagsBlueGreen),
Labels: map[string]string{
"blue": "",
"green": "",
},
},
})
require.NoError(t, err)
_, err = testCtx.client.Create(ctx, &entity.CreateEntityRequest{
Entity: &entity.Entity{
Key: "/dashboards.grafana.app/dashboards/default/red-green",
Body: []byte(dashboardWithTagsRedGreen),
Labels: map[string]string{
"red": "",
"green": "",
},
},
})
require.NoError(t, err)
resp, err := testCtx.client.List(ctx, &entity.EntityListRequest{
Key: []string{"/dashboards.grafana.app/dashboards/default"},
WithBody: false,
Labels: map[string]string{
"red": "",
},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Len(t, resp.Results, 1)
require.Equal(t, resp.Results[0].Name, "red-green")
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Key: []string{"/dashboards.grafana.app/dashboards/default"},
WithBody: false,
Labels: map[string]string{
"red": "",
"green": "",
},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Len(t, resp.Results, 1)
require.Equal(t, resp.Results[0].Name, "red-green")
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Key: []string{"/dashboards.grafana.app/dashboards/default"},
WithBody: false,
Labels: map[string]string{
"red": "invalid",
},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Len(t, resp.Results, 0)
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Key: []string{"/dashboards.grafana.app/dashboards/default"},
WithBody: false,
Labels: map[string]string{
"green": "",
},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Len(t, resp.Results, 2)
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Key: []string{"/dashboards.grafana.app/dashboards/default"},
WithBody: false,
Labels: map[string]string{
"yellow": "",
},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Len(t, resp.Results, 0)
})
}

View File

@ -1,39 +0,0 @@
{
"tags": [
"blue",
"green"
],
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 221,
"links": [],
"liveNow": false,
"panels": [
{
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 8
},
"id": 8,
"title": "Row title",
"type": "row"
}
],
"schemaVersion": 36,
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "special ds",
"uid": "mocpwtR4k",
"version": 1,
"weekStart": ""
}

View File

@ -1,39 +0,0 @@
{
"tags": [
"red",
"green"
],
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 221,
"links": [],
"liveNow": false,
"panels": [
{
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 8
},
"id": 8,
"title": "Row title",
"type": "row"
}
],
"schemaVersion": 36,
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "special ds",
"uid": "mocpwtR4k",
"version": 1,
"weekStart": ""
}

View File

@ -7,10 +7,9 @@ import (
"time"
"github.com/go-sql-driver/mysql"
"github.com/grafana/grafana/pkg/storage/unified/sql/db"
"go.opentelemetry.io/otel/trace"
"xorm.io/xorm"
"github.com/grafana/grafana/pkg/services/store/entity/db"
)
func getEngineMySQL(getter *sectionGetter, _ trace.Tracer) (*xorm.Engine, error) {

View File

@ -5,6 +5,13 @@ import (
"database/sql"
)
const (
DriverPostgres = "postgres"
DriverMySQL = "mysql"
DriverSQLite = "sqlite"
DriverSQLite3 = "sqlite3"
)
// DBProvider provides access to a SQL Database.
type DBProvider interface {
// Init initializes the SQL Database, running migrations if needed. It is