Storage: Unified Storage based on Entity API (#71977)

* first round of entityapi updates

- quote column names and clean up insert/update queries
- replace grn with guid
- streamline table structure

fixes

streamline entity history

move EntitySummary into proto

remove EntitySummary

add guid to json

fix tests

change DB_Uuid to DB_NVarchar

fix folder test

convert interface to any

more cleanup

start entity store under grafana-apiserver dskit target

CRUD working, kind of

rough cut of wiring entity api to kube-apiserver

fake grafana user in context

add key to entity

list working

revert unnecessary changes

move entity storage files to their own package, clean up

use accessor to read/write grafana annotations

implement separate Create and Update functions

* go mod tidy

* switch from Kind to resource

* basic grpc storage server

* basic support for grpc entity store

* don't connect to database unless it's needed, pass user identity over grpc

* support getting user from k8s context, fix some mysql issues

* assign owner to snowflake dependency

* switch from ulid to uuid for guids

* cleanup, rename Search to List

* remove entityListResult

* EntityAPI: remove extra user abstraction (#79033)

* remove extra user abstraction

* add test stub (but

* move grpc context setup into client wrapper, fix lint issue

* remove unused constants

* remove custom json stuff

* basic list filtering, add todo

* change target to storage-server, allow entityStore flag in prod mode

* fix issue with Update

* EntityAPI: make test work, need to resolve expected differences (#79123)

* make test work, need to resolve expected differences

* remove the fields not supported by legacy

* sanitize out the bits legacy does not support

* sanitize out the bits legacy does not support

---------

Co-authored-by: Ryan McKinley <ryantxu@gmail.com>

* update feature toggle generated files

* remove unused http headers

* update feature flag strategy

* devmode

* update readme

* spelling

* readme

---------

Co-authored-by: Ryan McKinley <ryantxu@gmail.com>
This commit is contained in:
Dan Cech 2023-12-06 21:21:21 +01:00 committed by GitHub
parent 07915703fe
commit c4c9bfaf2e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 4358 additions and 2389 deletions

View File

@ -172,11 +172,12 @@ Experimental features might be changed or removed without prior notice.
The following toggles require explicitly setting Grafana's [app mode]({{< relref "../_index.md#app_mode" >}}) to 'development' before you can enable this feature toggle. These features tend to be experimental.
| Feature toggle name | Description |
| ------------------------- | -------------------------------------------------------------------------------------------- |
| `entityStore` | SQL-based entity store (requires storage flag also) |
| `externalServiceAuth` | Starts an OAuth2 authentication provider for external services |
| `idForwarding` | Generate signed id token for identity that can be forwarded to plugins and external services |
| `externalServiceAccounts` | Automatic service account and token setup for plugins |
| `panelTitleSearchInV1` | Enable searching for dashboards using panel title in search v1 |
| `ssoSettingsApi` | Enables the SSO settings API |
| Feature toggle name | Description |
| ------------------------------------- | -------------------------------------------------------------------------------------------- |
| `unifiedStorage` | SQL-based k8s storage |
| `externalServiceAuth` | Starts an OAuth2 authentication provider for external services |
| `grafanaAPIServerEnsureKubectlAccess` | Start an additional https handler and write kubectl options |
| `idForwarding` | Generate signed id token for identity that can be forwarded to plugins and external services |
| `externalServiceAccounts` | Automatic service account and token setup for plugins |
| `panelTitleSearchInV1` | Enable searching for dashboards using panel title in search v1 |
| `ssoSettingsApi` | Enables the SSO settings API |

4
go.mod
View File

@ -123,7 +123,7 @@ require (
gopkg.in/mail.v2 v2.3.1 // @grafana/backend-platform
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // @grafana/alerting-squad-backend
xorm.io/builder v0.3.6 // indirect; @grafana/backend-platform
xorm.io/builder v0.3.6 // @grafana/backend-platform
xorm.io/core v0.7.3 // @grafana/backend-platform
xorm.io/xorm v0.8.2 // @grafana/alerting-squad-backend
)
@ -175,7 +175,7 @@ require (
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect; @grafana/alerting-squad
github.com/hashicorp/go-multierror v1.1.1 // @grafana/alerting-squad
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/yamux v0.1.1 // indirect

View File

@ -43,7 +43,7 @@ export interface FeatureToggles {
topnav?: boolean;
dockedMegaMenu?: boolean;
grpcServer?: boolean;
entityStore?: boolean;
unifiedStorage?: boolean;
cloudWatchCrossAccountQuerying?: boolean;
redshiftAsyncQueryDataSupport?: boolean;
athenaAsyncQueryDataSupport?: boolean;
@ -105,6 +105,7 @@ export interface FeatureToggles {
metricsSummary?: boolean;
grafanaAPIServer?: boolean;
grafanaAPIServerWithExperimentalAPIs?: boolean;
grafanaAPIServerEnsureKubectlAccess?: boolean;
featureToggleAdminPage?: boolean;
awsAsyncQueryCaching?: boolean;
splitScopes?: boolean;

View File

@ -6,10 +6,12 @@ const (
Core string = "core"
GrafanaAPIServer string = "grafana-apiserver"
StorageServer string = "storage-server"
)
var dependencyMap = map[string][]string{
GrafanaAPIServer: {},
StorageServer: {},
Core: {},
All: {Core},
}

View File

@ -3,6 +3,7 @@ package modules
import (
"context"
"errors"
"strings"
"github.com/grafana/dskit/modules"
"github.com/grafana/dskit/services"
@ -92,7 +93,7 @@ func (m *service) Run(ctx context.Context) error {
listener := newServiceListener(m.log, m)
m.serviceManager.AddListener(listener)
m.log.Debug("Starting module service manager")
m.log.Debug("Starting module service manager", "targets", strings.Join(m.targets, ","))
// wait until a service fails or stop signal was received
err = m.serviceManager.StartAsync(ctx)
if err != nil {
@ -135,7 +136,7 @@ func (m *service) RegisterModule(name string, fn initFn) {
}
// RegisterInvisibleModule registers an invisible module with the dskit module manager.
// Invisible modules are not visible to the user, and are intendent to be used as dependencies.
// Invisible modules are not visible to the user, and are intended to be used as dependencies.
func (m *service) RegisterInvisibleModule(name string, fn initFn) {
m.moduleManager.RegisterModule(name, fn, modules.UserInvisibleModule)
}

View File

@ -15,6 +15,7 @@ import (
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/modules"
"github.com/grafana/grafana/pkg/services/featuremgmt"
storageServer "github.com/grafana/grafana/pkg/services/store/entity/server"
"github.com/grafana/grafana/pkg/setting"
)
@ -129,6 +130,10 @@ func (s *ModuleServer) Run() error {
// s.log.Debug("apiserver feature is disabled")
//}
m.RegisterModule(modules.StorageServer, func() (services.Service, error) {
return storageServer.ProvideService(s.cfg, s.features)
})
m.RegisterModule(modules.All, nil)
return m.Run(s.context)

View File

@ -134,6 +134,7 @@ import (
"github.com/grafana/grafana/pkg/services/star/starimpl"
"github.com/grafana/grafana/pkg/services/stats/statsimpl"
"github.com/grafana/grafana/pkg/services/store"
entityDB "github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
"github.com/grafana/grafana/pkg/services/store/kind"
"github.com/grafana/grafana/pkg/services/store/resolver"
@ -343,6 +344,8 @@ var wireBasicSet = wire.NewSet(
grpcserver.ProvideReflectionService,
interceptors.ProvideAuthenticator,
kind.ProvideService, // The registry of known kinds
entityDB.ProvideEntityDB,
wire.Bind(new(sqlstash.EntityDB), new(*entityDB.EntityDB)),
sqlstash.ProvideSQLEntityServer,
resolver.ProvideEntityReferenceResolver,
teamimpl.ProvideService,

View File

@ -222,10 +222,11 @@ var (
Created: time.Date(2022, time.September, 27, 12, 0, 0, 0, time.UTC),
},
{
Name: "entityStore",
Description: "SQL-based entity store (requires storage flag also)",
Name: "unifiedStorage",
Description: "SQL-based k8s storage",
Stage: FeatureStageExperimental,
RequiresDevMode: true,
RequiresRestart: true, // new SQL tables created
Owner: grafanaAppPlatformSquad,
Created: time.Date(2022, time.December, 1, 12, 0, 0, 0, time.UTC),
},
@ -738,6 +739,15 @@ var (
Owner: grafanaAppPlatformSquad,
Created: time.Date(2023, time.October, 6, 12, 0, 0, 0, time.UTC),
},
{
Name: "grafanaAPIServerEnsureKubectlAccess",
Description: "Start an additional https handler and write kubectl options",
Stage: FeatureStageExperimental,
RequiresDevMode: true,
RequiresRestart: true,
Owner: grafanaAppPlatformSquad,
Created: time.Date(2023, time.December, 6, 12, 0, 0, 0, time.UTC),
},
{
Name: "featureToggleAdminPage",
Description: "Enable admin page for managing feature toggles from the Grafana front-end",

View File

@ -24,7 +24,7 @@ dataConnectionsConsole,GA,@grafana/plugins-platform-backend,2022-06-01,false,fal
topnav,deprecated,@grafana/grafana-frontend-platform,2022-06-20,false,false,false,false
dockedMegaMenu,experimental,@grafana/grafana-frontend-platform,2023-09-18,false,false,false,true
grpcServer,preview,@grafana/grafana-app-platform-squad,2022-09-27,false,false,false,false
entityStore,experimental,@grafana/grafana-app-platform-squad,2022-12-01,true,false,false,false
unifiedStorage,experimental,@grafana/grafana-app-platform-squad,2022-12-01,true,false,true,false
cloudWatchCrossAccountQuerying,GA,@grafana/aws-datasources,2022-11-28,false,false,false,false
redshiftAsyncQueryDataSupport,GA,@grafana/aws-datasources,2022-08-27,false,false,false,false
athenaAsyncQueryDataSupport,GA,@grafana/aws-datasources,2022-08-27,false,false,false,true
@ -86,6 +86,7 @@ traceQLStreaming,experimental,@grafana/observability-traces-and-profiling,2023-0
metricsSummary,experimental,@grafana/observability-traces-and-profiling,2023-08-28,false,false,false,true
grafanaAPIServer,experimental,@grafana/grafana-app-platform-squad,2023-07-14,false,false,false,false
grafanaAPIServerWithExperimentalAPIs,experimental,@grafana/grafana-app-platform-squad,2023-10-06,false,false,false,false
grafanaAPIServerEnsureKubectlAccess,experimental,@grafana/grafana-app-platform-squad,2023-12-06,true,false,true,false
featureToggleAdminPage,experimental,@grafana/grafana-operator-experience-squad,2023-07-18,false,false,true,false
awsAsyncQueryCaching,preview,@grafana/aws-datasources,2023-07-21,false,false,false,false
splitScopes,preview,@grafana/identity-access-team,2023-07-21,false,false,true,false

1 Name Stage Owner Created requiresDevMode RequiresLicense RequiresRestart FrontendOnly
24 topnav deprecated @grafana/grafana-frontend-platform 2022-06-20 false false false false
25 dockedMegaMenu experimental @grafana/grafana-frontend-platform 2023-09-18 false false false true
26 grpcServer preview @grafana/grafana-app-platform-squad 2022-09-27 false false false false
27 entityStore unifiedStorage experimental @grafana/grafana-app-platform-squad 2022-12-01 true false false true false
28 cloudWatchCrossAccountQuerying GA @grafana/aws-datasources 2022-11-28 false false false false
29 redshiftAsyncQueryDataSupport GA @grafana/aws-datasources 2022-08-27 false false false false
30 athenaAsyncQueryDataSupport GA @grafana/aws-datasources 2022-08-27 false false false true
86 metricsSummary experimental @grafana/observability-traces-and-profiling 2023-08-28 false false false true
87 grafanaAPIServer experimental @grafana/grafana-app-platform-squad 2023-07-14 false false false false
88 grafanaAPIServerWithExperimentalAPIs experimental @grafana/grafana-app-platform-squad 2023-10-06 false false false false
89 grafanaAPIServerEnsureKubectlAccess experimental @grafana/grafana-app-platform-squad 2023-12-06 true false true false
90 featureToggleAdminPage experimental @grafana/grafana-operator-experience-squad 2023-07-18 false false true false
91 awsAsyncQueryCaching preview @grafana/aws-datasources 2023-07-21 false false false false
92 splitScopes preview @grafana/identity-access-team 2023-07-21 false false true false

View File

@ -107,9 +107,9 @@ const (
// Run the GRPC server
FlagGrpcServer = "grpcServer"
// FlagEntityStore
// SQL-based entity store (requires storage flag also)
FlagEntityStore = "entityStore"
// FlagUnifiedStorage
// SQL-based k8s storage
FlagUnifiedStorage = "unifiedStorage"
// FlagCloudWatchCrossAccountQuerying
// Enables cross-account querying in CloudWatch datasources
@ -355,6 +355,10 @@ const (
// Register experimental APIs with the k8s API server
FlagGrafanaAPIServerWithExperimentalAPIs = "grafanaAPIServerWithExperimentalAPIs"
// FlagGrafanaAPIServerEnsureKubectlAccess
// Start an additional https handler and write kubectl options
FlagGrafanaAPIServerEnsureKubectlAccess = "grafanaAPIServerEnsureKubectlAccess"
// FlagFeatureToggleAdminPage
// Enable admin page for managing feature toggles from the Grafana front-end
FlagFeatureToggleAdminPage = "featureToggleAdminPage"

View File

@ -22,10 +22,7 @@ import (
func TestFeatureToggleFiles(t *testing.T) {
legacyNames := map[string]bool{
"httpclientprovider_azure_auth": true,
"service-accounts": true,
"database_metrics": true,
"live-service-web-worker": true,
"live-service-web-worker": true,
}
t.Run("check registry constraints", func(t *testing.T) {

View File

@ -3,8 +3,6 @@
## Basic Setup
```ini
app_mode = development
[feature_toggles]
grafanaAPIServer = true
kubernetesPlaylists = true
@ -55,7 +53,19 @@ data/grafana-apiserver
### `kubectl` access
From the root of the Grafanaa repository, run the following:
For kubectl to work, grafana needs to run over https. To simplify development, you can use:
```ini
app_mode = development
[feature_toggles]
grafanaAPIServer = true
grafanaAPIServerEnsureKubectlAccess = true
kubernetesPlaylists = true
```
This will create a development kubeconfig and start a parallel ssl listener. It can be registered by
navigating to the root grafana folder, then running:
```bash
export KUBECONFIG=$PWD/data/grafana-apiserver/grafana.kubeconfig
kubectl api-resources
@ -64,3 +74,6 @@ kubectl api-resources
### Grafana API Access
The Kubernetes compatible API can be accessed using existing Grafana AuthN at: [http://localhost:3000/apis](http://localhost:3000/apis).
The equivalent openapi docs can be seen in [http://localhost:3000/swagger](http://localhost:3000/swagger),
select the relevant API from the dropdown in the upper right.

View File

@ -47,7 +47,7 @@ func newConfig(cfg *setting.Cfg, features featuremgmt.FeatureToggles) *config {
return &config{
enabled: features.IsEnabledGlobally(featuremgmt.FlagGrafanaAPIServer),
devMode: cfg.Env == setting.Dev,
devMode: features.IsEnabledGlobally(featuremgmt.FlagGrafanaAPIServerEnsureKubectlAccess),
dataPath: filepath.Join(cfg.DataPath, "grafana-apiserver"),
ip: ip,
port: port,

View File

@ -15,6 +15,8 @@ import (
"github.com/go-logr/logr"
"github.com/grafana/dskit/services"
"golang.org/x/mod/semver"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -36,22 +38,29 @@ import (
"github.com/grafana/grafana/pkg/api/routing"
"github.com/grafana/grafana/pkg/infra/appcontext"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/modules"
"github.com/grafana/grafana/pkg/registry"
contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model"
"github.com/grafana/grafana/pkg/services/featuremgmt"
entitystorage "github.com/grafana/grafana/pkg/services/grafana-apiserver/storage/entity"
filestorage "github.com/grafana/grafana/pkg/services/grafana-apiserver/storage/file"
"github.com/grafana/grafana/pkg/services/store/entity"
entityDB "github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
"github.com/grafana/grafana/pkg/setting"
)
type StorageType string
const (
StorageTypeFile StorageType = "file"
StorageTypeEtcd StorageType = "etcd"
StorageTypeLegacy StorageType = "legacy"
StorageTypeFile StorageType = "file"
StorageTypeEtcd StorageType = "etcd"
StorageTypeLegacy StorageType = "legacy"
StorageTypeUnified StorageType = "unified"
StorageTypeUnifiedGrpc StorageType = "unified-grpc"
)
var (
@ -107,9 +116,13 @@ type service struct {
config *config
restConfig *clientrest.Config
cfg *setting.Cfg
features featuremgmt.FeatureToggles
stopCh chan struct{}
stoppedCh chan error
db db.DB
rr routing.RouteRegister
handler http.Handler
builders []APIGroupBuilder
@ -125,14 +138,18 @@ func ProvideService(
rr routing.RouteRegister,
authz authorizer.Authorizer,
tracing *tracing.TracingService,
db db.DB,
) (*service, error) {
s := &service{
config: newConfig(cfg, features),
cfg: cfg,
features: features,
rr: rr,
stopCh: make(chan struct{}),
builders: []APIGroupBuilder{},
authorizer: authz,
tracing: tracing,
db: db, // For Unified storage
}
// This will be used when running as a dskit service
@ -154,17 +171,11 @@ func ProvideService(
req.URL.Path = "/"
}
//TODO: add support for the existing MetricsEndpointBasicAuth config option
// TODO: add support for the existing MetricsEndpointBasicAuth config option
if req.URL.Path == "/apiserver-metrics" {
req.URL.Path = "/metrics"
}
ctx := req.Context()
signedInUser := appcontext.MustUser(ctx)
req.Header.Set("X-Remote-User", strconv.FormatInt(signedInUser.UserID, 10))
req.Header.Set("X-Remote-Group", "grafana")
resp := responsewriter.WrapForHTTP1Or2(c.Resp)
s.handler.ServeHTTP(resp, req)
}
@ -253,7 +264,8 @@ func (s *service) start(ctx context.Context) error {
}
}
if s.config.storageType == StorageTypeEtcd {
switch s.config.storageType {
case StorageTypeEtcd:
o.Etcd.StorageConfig.Transport.ServerList = s.config.etcdServers
if err := o.Etcd.Validate(); len(err) > 0 {
return err[0]
@ -261,10 +273,45 @@ func (s *service) start(ctx context.Context) error {
if err := o.Etcd.ApplyTo(&serverConfig.Config); err != nil {
return err
}
}
if s.config.storageType == StorageTypeFile {
case StorageTypeUnified:
if !s.features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorage) {
return fmt.Errorf("unified storage requires the unifiedStorage feature flag (and app_mode = development)")
}
eDB, err := entityDB.ProvideEntityDB(s.db, s.cfg, s.features)
if err != nil {
return err
}
store, err := sqlstash.ProvideSQLEntityServer(eDB)
if err != nil {
return err
}
serverConfig.Config.RESTOptionsGetter = entitystorage.NewRESTOptionsGetter(s.cfg, store, nil)
case StorageTypeUnifiedGrpc:
// Create a connection to the gRPC server
// TODO: support configuring the gRPC server address
conn, err := grpc.Dial("localhost:10000", grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return err
}
// TODO: determine when to close the connection, we cannot defer it here
// defer conn.Close()
// Create a client instance
store := entity.NewEntityStoreClientWrapper(conn)
serverConfig.Config.RESTOptionsGetter = entitystorage.NewRESTOptionsGetter(s.cfg, store, nil)
case StorageTypeFile:
serverConfig.RESTOptionsGetter = filestorage.NewRESTOptionsGetter(s.config.dataPath, o.Etcd.StorageConfig)
case StorageTypeLegacy:
// do nothing?
}
serverConfig.Authorization.Authorizer = s.authorizer

View File

@ -0,0 +1,91 @@
// SPDX-License-Identifier: AGPL-3.0-only
package entity
import (
"encoding/json"
"path"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/storagebackend/factory"
flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request"
"k8s.io/client-go/tools/cache"
entityStore "github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/setting"
)
var _ generic.RESTOptionsGetter = (*RESTOptionsGetter)(nil)
type RESTOptionsGetter struct {
cfg *setting.Cfg
store entityStore.EntityStoreServer
Codec runtime.Codec
}
func NewRESTOptionsGetter(cfg *setting.Cfg, store entityStore.EntityStoreServer, codec runtime.Codec) *RESTOptionsGetter {
return &RESTOptionsGetter{
cfg: cfg,
store: store,
Codec: codec,
}
}
func (f *RESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) {
// build connection string to uniquely identify the storage backend
connectionInfo, err := json.Marshal(f.cfg.SectionWithEnvOverrides("entity_api").KeysHash())
if err != nil {
return generic.RESTOptions{}, err
}
storageConfig := &storagebackend.ConfigForResource{
Config: storagebackend.Config{
Type: "custom",
Prefix: "",
Transport: storagebackend.TransportConfig{
ServerList: []string{
string(connectionInfo),
},
},
Paging: false,
Codec: f.Codec,
EncodeVersioner: nil,
Transformer: nil,
CompactionInterval: 0,
CountMetricPollPeriod: 0,
DBMetricPollInterval: 0,
HealthcheckTimeout: 0,
ReadycheckTimeout: 0,
StorageObjectCountTracker: nil,
},
GroupResource: resource,
}
ret := generic.RESTOptions{
StorageConfig: storageConfig,
Decorator: func(
config *storagebackend.ConfigForResource,
resourcePrefix string,
keyFunc func(obj runtime.Object) (string, error),
newFunc func() runtime.Object,
newListFunc func() runtime.Object,
getAttrsFunc storage.AttrFunc,
trigger storage.IndexerFuncs,
indexers *cache.Indexers,
) (storage.Interface, factory.DestroyFunc, error) {
return NewStorage(config, resource, f.store, f.Codec, keyFunc, newFunc, newListFunc, getAttrsFunc)
},
DeleteCollectionWorkers: 0,
EnableGarbageCollection: false,
ResourcePrefix: path.Join(storageConfig.Prefix, resource.Group, resource.Resource),
CountMetricPollPeriod: 1 * time.Second,
StorageObjectCountTracker: flowcontrolrequest.NewStorageObjectCountTracker(),
}
return ret, nil
}

View File

@ -0,0 +1,390 @@
// SPDX-License-Identifier: AGPL-3.0-only
// Provenance-includes-location: https://github.com/kubernetes-sigs/apiserver-runtime/blob/main/pkg/experimental/storage/filepath/jsonfile_rest.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: The Kubernetes Authors.
package entity
import (
"context"
"errors"
"fmt"
"reflect"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/storagebackend/factory"
entityStore "github.com/grafana/grafana/pkg/services/store/entity"
"github.com/grafana/grafana/pkg/util"
)
var _ storage.Interface = (*Storage)(nil)
const MaxUpdateAttempts = 1
// Storage implements storage.Interface and storage resources as JSON files on disk.
type Storage struct {
config *storagebackend.ConfigForResource
store entityStore.EntityStoreServer
gr schema.GroupResource
codec runtime.Codec
keyFunc func(obj runtime.Object) (string, error)
newFunc func() runtime.Object
newListFunc func() runtime.Object
getAttrsFunc storage.AttrFunc
// trigger storage.IndexerFuncs
// indexers *cache.Indexers
// watchSet *WatchSet
}
func NewStorage(
config *storagebackend.ConfigForResource,
gr schema.GroupResource,
store entityStore.EntityStoreServer,
codec runtime.Codec,
keyFunc func(obj runtime.Object) (string, error),
newFunc func() runtime.Object,
newListFunc func() runtime.Object,
getAttrsFunc storage.AttrFunc,
) (storage.Interface, factory.DestroyFunc, error) {
return &Storage{
config: config,
gr: gr,
codec: codec,
store: store,
keyFunc: keyFunc,
newFunc: newFunc,
newListFunc: newListFunc,
getAttrsFunc: getAttrsFunc,
}, nil, nil
}
// Create adds a new object at a key unless it already exists. 'ttl' is time-to-live
// in seconds (0 means forever). If no error is returned and out is not nil, out will be
// set to the read value from database.
func (s *Storage) Create(ctx context.Context, key string, obj runtime.Object, out runtime.Object, ttl uint64) error {
requestInfo, ok := request.RequestInfoFrom(ctx)
if !ok {
return apierrors.NewInternalError(fmt.Errorf("could not get request info"))
}
if err := s.Versioner().PrepareObjectForStorage(obj); err != nil {
return err
}
metaAccessor, err := meta.Accessor(obj)
if err != nil {
return err
}
// Replace the default name generation strategy
if metaAccessor.GetGenerateName() != "" {
k, err := ParseKey(key)
if err != nil {
return err
}
k.Name = util.GenerateShortUID()
key = k.String()
metaAccessor.SetName(k.Name)
metaAccessor.SetGenerateName("")
}
e, err := resourceToEntity(key, obj, requestInfo)
if err != nil {
return err
}
req := &entityStore.CreateEntityRequest{
Entity: e,
}
rsp, err := s.store.Create(ctx, req)
if err != nil {
return err
}
if rsp.Status != entityStore.CreateEntityResponse_CREATED {
return fmt.Errorf("this was not a create operation... (%s)", rsp.Status.String())
}
err = entityToResource(rsp.Entity, out)
if err != nil {
return apierrors.NewInternalError(err)
}
/*
s.watchSet.notifyWatchers(watch.Event{
Object: out.DeepCopyObject(),
Type: watch.Added,
})
*/
return nil
}
// Delete removes the specified key and returns the value that existed at that spot.
// If key didn't exist, it will return NotFound storage error.
// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the
// current version of the object to avoid read operation from storage to get it.
// However, the implementations have to retry in case suggestion is stale.
func (s *Storage) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
grn, err := keyToGRN(key)
if err != nil {
return apierrors.NewInternalError(err)
}
previousVersion := ""
if preconditions != nil && preconditions.ResourceVersion != nil {
previousVersion = *preconditions.ResourceVersion
}
rsp, err := s.store.Delete(ctx, &entityStore.DeleteEntityRequest{
GRN: grn,
PreviousVersion: previousVersion,
})
if err != nil {
return err
}
err = entityToResource(rsp.Entity, out)
if err != nil {
return apierrors.NewInternalError(err)
}
return nil
}
// Watch begins watching the specified key. Events are decoded into API objects,
// and any items selected by 'p' are sent down to returned watch.Interface.
// resourceVersion may be used to specify what version to begin watching,
// which should be the current resourceVersion, and no longer rv+1
// (e.g. reconnecting without missing any updates).
// If resource version is "0", this interface will get current object at given key
// and send it in an "ADDED" event, before watch starts.
func (s *Storage) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return nil, apierrors.NewMethodNotSupported(schema.GroupResource{}, "watch")
}
// Get unmarshals object found at key into objPtr. On a not found error, will either
// return a zero object of the requested type, or an error, depending on 'opts.ignoreNotFound'.
// Treats empty responses and nil response nodes exactly like a not found error.
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
func (s *Storage) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error {
rsp, err := s.store.Read(ctx, &entityStore.ReadEntityRequest{
Key: key,
WithMeta: true,
WithBody: true,
WithStatus: true,
WithSummary: true,
})
if err != nil {
return err
}
if rsp.GRN == nil {
if opts.IgnoreNotFound {
return nil
}
return apierrors.NewNotFound(s.gr, key)
}
err = entityToResource(rsp, objPtr)
if err != nil {
return apierrors.NewInternalError(err)
}
return nil
}
// GetList unmarshalls objects found at key into a *List api object (an object
// that satisfies runtime.IsList definition).
// If 'opts.Recursive' is false, 'key' is used as an exact match. If `opts.Recursive'
// is true, 'key' is used as a prefix.
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
func (s *Storage) GetList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil {
return err
}
rsp, err := s.store.List(ctx, &entityStore.EntityListRequest{
Key: []string{key},
WithBody: true,
WithLabels: true,
WithFields: true,
NextPageToken: opts.Predicate.Continue,
Limit: opts.Predicate.Limit,
// TODO push label/field matching down to storage
})
if err != nil {
return apierrors.NewInternalError(err)
}
for _, r := range rsp.Results {
res := s.newFunc()
err := entityToResource(r, res)
if err != nil {
return apierrors.NewInternalError(err)
}
// TODO filter in storage
matches, err := opts.Predicate.Matches(res)
if err != nil {
return apierrors.NewInternalError(err)
}
if !matches {
continue
}
v.Set(reflect.Append(v, reflect.ValueOf(res).Elem()))
}
listAccessor, err := meta.ListAccessor(listObj)
if err != nil {
return err
}
if rsp.NextPageToken != "" {
listAccessor.SetContinue(rsp.NextPageToken)
}
return nil
}
// GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'destination')
// retrying the update until success if there is index conflict.
// Note that object passed to tryUpdate may change across invocations of tryUpdate() if
// other writers are simultaneously updating it, so tryUpdate() needs to take into account
// the current contents of the object when deciding how the update object should look.
// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false
// else `destination` will be set to the zero value of it's type.
// If the eventual successful invocation of `tryUpdate` returns an output with the same serialized
// contents as the input, it won't perform any update, but instead set `destination` to an object with those
// contents.
// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the
// current version of the object to avoid read operation from storage to get it.
// However, the implementations have to retry in case suggestion is stale.
func (s *Storage) GuaranteedUpdate(
ctx context.Context,
key string,
destination runtime.Object,
ignoreNotFound bool,
preconditions *storage.Preconditions,
tryUpdate storage.UpdateFunc,
cachedExistingObject runtime.Object,
) error {
var err error
for attempt := 1; attempt <= MaxUpdateAttempts; attempt = attempt + 1 {
err = s.guaranteedUpdate(ctx, key, destination, ignoreNotFound, preconditions, tryUpdate, cachedExistingObject)
if err == nil {
return nil
}
}
return err
}
func (s *Storage) guaranteedUpdate(
ctx context.Context,
key string,
destination runtime.Object,
ignoreNotFound bool,
preconditions *storage.Preconditions,
tryUpdate storage.UpdateFunc,
cachedExistingObject runtime.Object,
) error {
requestInfo, ok := request.RequestInfoFrom(ctx)
if !ok {
return apierrors.NewInternalError(fmt.Errorf("could not get request info"))
}
err := s.Get(ctx, key, storage.GetOptions{}, destination)
if err != nil {
return err
}
res := &storage.ResponseMeta{}
updatedObj, _, err := tryUpdate(destination, *res)
if err != nil {
var statusErr *apierrors.StatusError
if errors.As(err, &statusErr) {
// For now, forbidden may come from a mutation handler
if statusErr.ErrStatus.Reason == metav1.StatusReasonForbidden {
return statusErr
}
}
return apierrors.NewInternalError(fmt.Errorf("could not successfully update object. key=%s, err=%s", key, err.Error()))
}
e, err := resourceToEntity(key, updatedObj, requestInfo)
if err != nil {
return err
}
// e.GRN.ResourceKind = destination.GetObjectKind().GroupVersionKind().Kind
previousVersion := ""
if preconditions != nil && preconditions.ResourceVersion != nil {
previousVersion = *preconditions.ResourceVersion
}
req := &entityStore.UpdateEntityRequest{
Entity: e,
PreviousVersion: previousVersion,
}
rsp, err := s.store.Update(ctx, req)
if err != nil {
return err // continue???
}
if rsp.Status == entityStore.UpdateEntityResponse_UNCHANGED {
return nil // destination is already set
}
err = entityToResource(rsp.Entity, destination)
if err != nil {
return apierrors.NewInternalError(err)
}
/*
s.watchSet.notifyWatchers(watch.Event{
Object: destination.DeepCopyObject(),
Type: watch.Modified,
})
*/
return nil
}
// Count returns number of different entries under the key (generally being path prefix).
func (s *Storage) Count(key string) (int64, error) {
return 0, nil
}
func (s *Storage) Versioner() storage.Versioner {
return &storage.APIObjectVersioner{}
}
func (s *Storage) RequestWatchProgress(ctx context.Context) error {
return nil
}

View File

@ -0,0 +1,263 @@
package entity
import (
"encoding/json"
"fmt"
"reflect"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/endpoints/request"
"github.com/grafana/grafana/pkg/infra/grn"
"github.com/grafana/grafana/pkg/kinds"
entityStore "github.com/grafana/grafana/pkg/services/store/entity"
)
type Key struct {
Group string
Resource string
Namespace string
Name string
Subresource string
}
func ParseKey(key string) (*Key, error) {
// /<group>/<resource>/<namespace>/<name>(/<subresource>)
parts := strings.SplitN(key, "/", 6)
if len(parts) != 5 && len(parts) != 6 {
return nil, fmt.Errorf("invalid key (expecting 4 or 5 parts) " + key)
}
if parts[0] != "" {
return nil, fmt.Errorf("invalid key (expecting leading slash) " + key)
}
k := &Key{
Group: parts[1],
Resource: parts[2],
Namespace: parts[3],
Name: parts[4],
}
if len(parts) == 6 {
k.Subresource = parts[5]
}
return k, nil
}
func (k *Key) String() string {
if len(k.Subresource) > 0 {
return fmt.Sprintf("/%s/%s/%s/%s/%s", k.Group, k.Resource, k.Namespace, k.Name, k.Subresource)
}
return fmt.Sprintf("/%s/%s/%s/%s", k.Group, k.Resource, k.Namespace, k.Name)
}
func (k *Key) IsEqual(other *Key) bool {
return k.Group == other.Group &&
k.Resource == other.Resource &&
k.Namespace == other.Namespace &&
k.Name == other.Name &&
k.Subresource == other.Subresource
}
func (k *Key) TenantID() (int64, error) {
if k.Namespace == "default" {
return 1, nil
}
tid := strings.Split(k.Namespace, "-")
if len(tid) != 2 || !(tid[0] == "org" || tid[0] == "tenant") {
return 0, fmt.Errorf("invalid namespace, expected org|tenant-${#}")
}
intVar, err := strconv.ParseInt(tid[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("invalid namespace, expected number")
}
return intVar, nil
}
func (k *Key) ToGRN() (*grn.GRN, error) {
tid, err := k.TenantID()
if err != nil {
return nil, err
}
fullResource := k.Resource
if k.Subresource != "" {
fullResource = fmt.Sprintf("%s/%s", k.Resource, k.Subresource)
}
return &grn.GRN{
ResourceGroup: k.Group,
ResourceKind: fullResource,
ResourceIdentifier: k.Name,
TenantID: tid,
}, nil
}
// Convert an etcd key to GRN style
func keyToGRN(key string) (*grn.GRN, error) {
k, err := ParseKey(key)
if err != nil {
return nil, err
}
return k.ToGRN()
}
// this is terrible... but just making it work!!!!
func entityToResource(rsp *entityStore.Entity, res runtime.Object) error {
var err error
metaAccessor, err := meta.Accessor(res)
if err != nil {
return err
}
if rsp.GRN == nil {
return fmt.Errorf("invalid entity, missing GRN")
}
if len(rsp.Meta) > 0 {
err = json.Unmarshal(rsp.Meta, res)
if err != nil {
return err
}
}
metaAccessor.SetName(rsp.GRN.ResourceIdentifier)
if rsp.GRN.TenantID != 1 {
metaAccessor.SetNamespace(fmt.Sprintf("tenant-%d", rsp.GRN.TenantID))
} else {
metaAccessor.SetNamespace("default") // org 1
}
metaAccessor.SetUID(types.UID(rsp.Guid))
metaAccessor.SetResourceVersion(rsp.Version)
metaAccessor.SetCreationTimestamp(metav1.Unix(rsp.CreatedAt/1000, rsp.CreatedAt%1000*1000000))
grafanaAccessor := kinds.MetaAccessor(metaAccessor)
if rsp.Folder != "" {
grafanaAccessor.SetFolder(rsp.Folder)
}
if rsp.CreatedBy != "" {
grafanaAccessor.SetCreatedBy(rsp.CreatedBy)
}
if rsp.UpdatedBy != "" {
grafanaAccessor.SetUpdatedBy(rsp.UpdatedBy)
}
if rsp.UpdatedAt != 0 {
updatedAt := time.UnixMilli(rsp.UpdatedAt).UTC()
grafanaAccessor.SetUpdatedTimestamp(&updatedAt)
}
grafanaAccessor.SetSlug(rsp.Slug)
if rsp.Origin != nil {
originTime := time.UnixMilli(rsp.Origin.Time).UTC()
grafanaAccessor.SetOriginInfo(&kinds.ResourceOriginInfo{
Name: rsp.Origin.Source,
Key: rsp.Origin.Key,
// Path: rsp.Origin.Path,
Timestamp: &originTime,
})
}
if len(rsp.Labels) > 0 {
metaAccessor.SetLabels(rsp.Labels)
}
// TODO fields?
if len(rsp.Body) > 0 {
spec := reflect.ValueOf(res).Elem().FieldByName("Spec")
if spec != (reflect.Value{}) && spec.CanSet() {
err = json.Unmarshal(rsp.Body, spec.Addr().Interface())
if err != nil {
return err
}
}
}
if len(rsp.Status) > 0 {
status := reflect.ValueOf(res).Elem().FieldByName("Status")
if status != (reflect.Value{}) && status.CanSet() {
err = json.Unmarshal(rsp.Status, status.Addr().Interface())
if err != nil {
return err
}
}
}
return nil
}
func resourceToEntity(key string, res runtime.Object, requestInfo *request.RequestInfo) (*entityStore.Entity, error) {
metaAccessor, err := meta.Accessor(res)
if err != nil {
return nil, err
}
g, err := keyToGRN(key)
if err != nil {
return nil, err
}
grafanaAccessor := kinds.MetaAccessor(metaAccessor)
rsp := &entityStore.Entity{
GRN: g,
GroupVersion: requestInfo.APIVersion,
Key: key,
Name: metaAccessor.GetName(),
Guid: string(metaAccessor.GetUID()),
Version: metaAccessor.GetResourceVersion(),
Folder: grafanaAccessor.GetFolder(),
CreatedAt: metaAccessor.GetCreationTimestamp().Time.UnixMilli(),
CreatedBy: grafanaAccessor.GetCreatedBy(),
UpdatedBy: grafanaAccessor.GetUpdatedBy(),
Slug: grafanaAccessor.GetSlug(),
Origin: &entityStore.EntityOriginInfo{
Source: grafanaAccessor.GetOriginName(),
Key: grafanaAccessor.GetOriginKey(),
// Path: grafanaAccessor.GetOriginPath(),
},
Labels: metaAccessor.GetLabels(),
}
if t := grafanaAccessor.GetUpdatedTimestamp(); t != nil {
rsp.UpdatedAt = t.UnixMilli()
}
if t := grafanaAccessor.GetOriginTimestamp(); t != nil {
rsp.Origin.Time = t.UnixMilli()
}
rsp.Meta, err = json.Marshal(meta.AsPartialObjectMetadata(metaAccessor))
if err != nil {
return nil, err
}
// TODO: store entire object in body?
spec := reflect.ValueOf(res).Elem().FieldByName("Spec")
if spec != (reflect.Value{}) {
rsp.Body, err = json.Marshal(spec.Interface())
if err != nil {
return nil, err
}
}
status := reflect.ValueOf(res).Elem().FieldByName("Status")
if status != (reflect.Value{}) {
rsp.Status, err = json.Marshal(status.Interface())
if err != nil {
return nil, err
}
}
return rsp, nil
}

View File

@ -578,7 +578,7 @@ type InitTestDBOpt struct {
var featuresEnabledDuringTests = []string{
featuremgmt.FlagPanelTitleSearch,
featuremgmt.FlagEntityStore,
featuremgmt.FlagUnifiedStorage,
}
// InitTestDBWithMigration initializes the test DB given custom migrations.

View File

@ -0,0 +1,110 @@
package entity
import (
context "context"
"strconv"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
status "google.golang.org/grpc/status"
"github.com/grafana/grafana/pkg/infra/appcontext"
)
var _ EntityStoreServer = (*entityStoreClientWrapper)(nil)
// wrapper for EntityStoreClient that implements EntityStore interface
type entityStoreClientWrapper struct {
EntityStoreClient
}
func (c *entityStoreClientWrapper) Read(ctx context.Context, in *ReadEntityRequest) (*Entity, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.Read(ctx, in)
}
func (c *entityStoreClientWrapper) BatchRead(ctx context.Context, in *BatchReadEntityRequest) (*BatchReadEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.BatchRead(ctx, in)
}
func (c *entityStoreClientWrapper) Write(ctx context.Context, in *WriteEntityRequest) (*WriteEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.Write(ctx, in)
}
func (c *entityStoreClientWrapper) Create(ctx context.Context, in *CreateEntityRequest) (*CreateEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.Create(ctx, in)
}
func (c *entityStoreClientWrapper) Update(ctx context.Context, in *UpdateEntityRequest) (*UpdateEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.Update(ctx, in)
}
func (c *entityStoreClientWrapper) Delete(ctx context.Context, in *DeleteEntityRequest) (*DeleteEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.Delete(ctx, in)
}
func (c *entityStoreClientWrapper) History(ctx context.Context, in *EntityHistoryRequest) (*EntityHistoryResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.History(ctx, in)
}
func (c *entityStoreClientWrapper) List(ctx context.Context, in *EntityListRequest) (*EntityListResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.List(ctx, in)
}
func (c *entityStoreClientWrapper) Watch(*EntityWatchRequest, EntityStore_WatchServer) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
func (c *entityStoreClientWrapper) wrapContext(ctx context.Context) (context.Context, error) {
user, err := appcontext.User(ctx)
if err != nil {
return nil, err
}
// set grpc metadata into the context to pass to the grpc server
ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs(
"grafana-idtoken", user.IDToken,
"grafana-userid", strconv.FormatInt(user.UserID, 10),
"grafana-orgid", strconv.FormatInt(user.OrgID, 10),
"grafana-login", user.Login,
))
return ctx, nil
}
// TEMPORARY... while we split this into a new service (see below)
func (c *entityStoreClientWrapper) AdminWrite(ctx context.Context, in *AdminWriteEntityRequest) (*WriteEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.AdminWrite(ctx, in)
}
func NewEntityStoreClientWrapper(cc grpc.ClientConnInterface) EntityStoreServer {
return &entityStoreClientWrapper{&entityStoreClient{cc}}
}

View File

@ -0,0 +1,153 @@
package db
import (
"fmt"
"strings"
"time"
"github.com/jmoiron/sqlx"
"xorm.io/xorm"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/services/featuremgmt"
// "github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/sqlstore/session"
"github.com/grafana/grafana/pkg/services/store/entity/migrations"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
)
var _ sqlstash.EntityDB = (*EntityDB)(nil)
func ProvideEntityDB(db db.DB, cfg *setting.Cfg, features featuremgmt.FeatureToggles) (*EntityDB, error) {
return &EntityDB{
db: db,
cfg: cfg,
features: features,
}, nil
}
type EntityDB struct {
db db.DB
features featuremgmt.FeatureToggles
engine *xorm.Engine
cfg *setting.Cfg
}
func (db *EntityDB) Init() error {
_, err := db.GetEngine()
return err
}
func (db *EntityDB) GetEngine() (*xorm.Engine, error) {
if db.engine != nil {
return db.engine, nil
}
var engine *xorm.Engine
var err error
cfgSection := db.cfg.SectionWithEnvOverrides("entity_api")
dbType := cfgSection.Key("db_type").MustString("")
// if explicit connection settings are provided, use them
if dbType != "" {
dbHost := cfgSection.Key("db_host").MustString("")
dbName := cfgSection.Key("db_name").MustString("")
dbUser := cfgSection.Key("db_user").MustString("")
dbPass := cfgSection.Key("db_pass").MustString("")
if dbType == "postgres" {
// TODO: support all postgres connection options
dbSslMode := cfgSection.Key("db_sslmode").MustString("disable")
addr, err := util.SplitHostPortDefault(dbHost, "127.0.0.1", "5432")
if err != nil {
return nil, fmt.Errorf("invalid host specifier '%s': %w", dbHost, err)
}
connectionString := fmt.Sprintf(
"user=%s password=%s host=%s port=%s dbname=%s sslmode=%s", // sslcert=%s sslkey=%s sslrootcert=%s",
dbUser, dbPass, addr.Host, addr.Port, dbName, dbSslMode, // ss.dbCfg.ClientCertPath, ss.dbCfg.ClientKeyPath, ss.dbCfg.CaCertPath
)
engine, err = xorm.NewEngine("postgres", connectionString)
if err != nil {
return nil, err
}
_, err = engine.Exec("SET SESSION enable_experimental_alter_column_type_general=true")
if err != nil {
return nil, err
}
} else if dbType == "mysql" {
// TODO: support all mysql connection options
protocol := "tcp"
if strings.HasPrefix(dbHost, "/") {
protocol = "unix"
}
connectionString := fmt.Sprintf("%s:%s@%s(%s)/%s?collation=utf8mb4_unicode_ci&allowNativePasswords=true&clientFoundRows=true",
dbUser, dbPass, protocol, dbHost, dbName)
engine, err = xorm.NewEngine("mysql", connectionString)
if err != nil {
return nil, err
}
engine.SetMaxOpenConns(0)
engine.SetMaxIdleConns(2)
engine.SetConnMaxLifetime(time.Second * time.Duration(14400))
_, err = engine.Exec("SELECT 1")
if err != nil {
return nil, err
}
} else {
// TODO: sqlite support
return nil, fmt.Errorf("invalid db type specified: %s", dbType)
}
// configure sql logging
debugSQL := cfgSection.Key("log_queries").MustBool(false)
if !debugSQL {
engine.SetLogger(&xorm.DiscardLogger{})
} else {
// add stack to database calls to be able to see what repository initiated queries. Top 7 items from the stack as they are likely in the xorm library.
// engine.SetLogger(sqlstore.NewXormLogger(log.LvlInfo, log.WithSuffix(log.New("sqlstore.xorm"), log.CallerContextKey, log.StackCaller(log.DefaultCallerDepth))))
engine.ShowSQL(true)
engine.ShowExecTime(true)
}
// otherwise, try to use the grafana db connection
} else {
if db.db == nil {
return nil, fmt.Errorf("no db connection provided")
}
engine = db.db.GetEngine()
}
db.engine = engine
if err := migrations.MigrateEntityStore(db, db.features); err != nil {
db.engine = nil
return nil, err
}
return db.engine, nil
}
func (db *EntityDB) GetSession() (*session.SessionDB, error) {
engine, err := db.GetEngine()
if err != nil {
return nil, err
}
return session.GetSession(sqlx.NewDb(engine.DB().DB, engine.DriverName())), nil
}
func (db *EntityDB) GetCfg() *setting.Cfg {
return db.cfg
}

View File

@ -25,6 +25,14 @@ func (i fakeEntityStore) Write(ctx context.Context, r *entity.WriteEntityRequest
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Create(ctx context.Context, r *entity.CreateEntityRequest) (*entity.CreateEntityResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Update(ctx context.Context, r *entity.UpdateEntityRequest) (*entity.UpdateEntityResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Read(ctx context.Context, r *entity.ReadEntityRequest) (*entity.Entity, error) {
return nil, fmt.Errorf("unimplemented")
}
@ -41,10 +49,14 @@ func (i fakeEntityStore) History(ctx context.Context, r *entity.EntityHistoryReq
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Search(ctx context.Context, r *entity.EntitySearchRequest) (*entity.EntitySearchResponse, error) {
func (i fakeEntityStore) List(ctx context.Context, r *entity.EntityListRequest) (*entity.EntityListResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Watch(*entity.EntityWatchRequest, entity.EntityStore_WatchServer) error {
return fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) FindReferences(ctx context.Context, r *entity.ReferenceRequest) (*entity.EntityListResponse, error) {
return nil, fmt.Errorf("unimplemented")
}

File diff suppressed because it is too large Load Diff

View File

@ -7,41 +7,75 @@ import "pkg/infra/grn/grn.proto";
// The canonical entity/document data -- this represents the raw bytes and storage level metadata
message Entity {
// Entity identifier -- tenant_id, kind, uid
grn.GRN GRN = 1;
// The version will change when the entity is saved. It is not necessarily sortable
// Globally unique ID set by the system. This can not be set explicitly
string guid = 1;
// The resourceVersion -- it will change whenever anythign on the object is saved
string version = 2;
// Time in epoch milliseconds that the entity was created
int64 created_at = 3;
// Entity identifier -- tenant_id, kind, uid
grn.GRN GRN = 3;
// Time in epoch milliseconds that the entity was updated
int64 updated_at = 4;
// group version of the entity
string group_version = 23;
// Who created the entity
string created_by = 5;
// k8s key value
string key = 22;
// Who updated the entity
string updated_by = 6;
// The folder UID
string folder = 4;
// The folder UID (not stored in the body)
string folder = 7;
// Raw meta from k8s
bytes meta = 5;
// MD5 digest of the body
string ETag = 8;
// Raw bytes of the storage entity. The kind will determine what is a valid payload
bytes body = 6;
// k8s style status (ignored for now)
bytes status = 7;
// the friendly name of the entity
string name = 8;
// Content Length
int64 size = 9;
// Raw bytes of the storage entity. The kind will determine what is a valid payload
bytes body = 10;
// MD5 digest of the body
string ETag = 10;
// Entity summary as JSON
bytes summary_json = 11;
// Time in epoch milliseconds that the entity was created
int64 created_at = 11;
// Who created the entity
string created_by = 12;
// Time in epoch milliseconds that the entity was updated
int64 updated_at = 13;
// Who updated the entity
string updated_by = 14;
// External location info
EntityOriginInfo origin = 12;
EntityOriginInfo origin = 15;
// human-readable description of the entity
string description = 16;
// URL safe version of the name. It will be unique within the folder
string slug = 17;
// Commit message (optional)
string message = 18;
// Key value pairs. Tags are are represented as keys with empty values
map<string,string> labels = 19;
// Optional field values. The schema will define and document possible values for a given kind
map<string, string> fields = 20;
// When errors exist
repeated EntityErrorInfo errors = 21;
}
// This stores additional metadata for items entities that were synced from external systmes
@ -69,29 +103,6 @@ message EntityErrorInfo {
bytes details_json = 3;
}
// This is a subset of Entity that does not include body or sync info
message EntityVersionInfo {
// The version will change when the entity is saved. It is not necessarily sortable
string version = 1;
// Time in epoch milliseconds that the entity was updated
int64 updated_at = 2;
// Who updated the entity
string updated_by = 3;
// Content Length
int64 size = 4;
// MD5 digest of the body
string ETag = 5;
// optional "save" or "commit" message
//
// NOTE: currently managed by the dashboard_version table, and will be returned from a "history" command
string comment = 6;
}
//-----------------------------------------------
// Get request/response
//-----------------------------------------------
@ -100,14 +111,22 @@ message ReadEntityRequest {
// Entity identifier
grn.GRN GRN = 1;
// Fetch an explicit version
string key = 7;
// Fetch an explicit version (default is latest)
string version = 2;
// Include the full meta bytes
bool with_meta = 3;
// Include the full body bytes
bool with_body = 3;
bool with_body = 4;
// Include the status bytes (ignored for now)
bool with_status = 5;
// Include derived summary metadata
bool with_summary = 4;
bool with_summary = 6;
}
//------------------------------------------------------
@ -127,85 +146,38 @@ message BatchReadEntityResponse {
//-----------------------------------------------
message WriteEntityRequest {
// Entity identifier
grn.GRN GRN = 1;
// Where to save the entity (empty will leave it unchanged)
string folder = 2;
// The raw entity body
bytes body = 3;
// Message that can be seen when exploring entity history
string comment = 4;
// Entity details
Entity entity = 1;
// Used for optimistic locking. If missing, the previous version will be replaced regardless
string previous_version = 5;
string previous_version = 2;
}
// This operation is useful when syncing a resource from external sources
// that have more accurate metadata information (git, or an archive).
// This process can bypass the forced checks that
// This process can bypass the forced checks that
message AdminWriteEntityRequest {
// Entity identifier
grn.GRN GRN = 1;
// Where to save the entity (empty will leave it unchanged)
string folder = 2;
// The raw entity body
bytes body = 3;
// Message that can be seen when exploring entity history
string comment = 4;
// Time in epoch milliseconds that the entity was created
// Optional, if 0 it will use the current time
int64 created_at = 5;
// Time in epoch milliseconds that the entity was updated
// Optional, if empty it will use the current user
int64 updated_at = 6;
// Who created the entity
// Optional, if 0 it will use the current time
string created_by = 7;
// Who updated the entity
// Optional, if empty it will use the current user
string updated_by = 8;
// An explicit version identifier
// Optional, if set, this will overwrite/define an explicit version
string version = 9;
// Entity details
Entity entity = 1;
// Used for optimistic locking. If missing, the previous version will be replaced regardless
// This may not be used along with an explicit version in the request
string previous_version = 10;
string previous_version = 2;
// Request that all previous versions are removed from the history
// This will make sense for systems that manage history explicitly externallay
bool clear_history = 11;
// Optionally define where the entity came from
EntityOriginInfo origin = 12;
bool clear_history = 3;
}
message WriteEntityResponse {
// Error info -- if exists, the save did not happen
EntityErrorInfo error = 1;
// Entity identifier
grn.GRN GRN = 2;
// Entity details with the body removed
EntityVersionInfo entity = 3;
// Entity summary as JSON
bytes summary_json = 4;
// Entity details
Entity entity = 2;
// Status code
Status status = 5;
Status status = 3;
// Status enumeration
enum Status {
@ -216,6 +188,62 @@ message WriteEntityResponse {
}
}
//-----------------------------------------------
// Create request/response
//-----------------------------------------------
message CreateEntityRequest {
// Entity details
Entity entity = 1;
}
message CreateEntityResponse {
// Error info -- if exists, the save did not happen
EntityErrorInfo error = 1;
// Entity details
Entity entity = 2;
// Status code
Status status = 3;
// Status enumeration
enum Status {
ERROR = 0;
CREATED = 1;
}
}
//-----------------------------------------------
// Update request/response
//-----------------------------------------------
message UpdateEntityRequest {
// Entity details
Entity entity = 1;
// Used for optimistic locking. If missing, the previous version will be replaced regardless
string previous_version = 2;
}
message UpdateEntityResponse {
// Error info -- if exists, the save did not happen
EntityErrorInfo error = 1;
// Entity details
Entity entity = 2;
// Status code
Status status = 3;
// Status enumeration
enum Status {
ERROR = 0;
UPDATED = 1;
UNCHANGED = 2;
}
}
//-----------------------------------------------
// Delete request/response
//-----------------------------------------------
@ -224,12 +252,28 @@ message DeleteEntityRequest {
// Entity identifier
grn.GRN GRN = 1;
// Used for optimistic locking. If missing, the previous version will be replaced regardless
string previous_version = 3;
string key = 3;
// Used for optimistic locking. If missing, the current version will be deleted regardless
string previous_version = 2;
}
message DeleteEntityResponse {
bool OK = 1;
// Error info -- if exists, the delete did not happen
EntityErrorInfo error = 1;
// Entity details
Entity entity = 2;
// Status code
Status status = 3;
// Status enumeration
enum Status {
ERROR = 0;
DELETED = 1;
NOTFOUND = 2;
}
}
//-----------------------------------------------
@ -241,7 +285,7 @@ message EntityHistoryRequest {
grn.GRN GRN = 1;
// Maximum number of items to return
int64 limit = 3;
int64 limit = 3;
// Starting from the requested page
string next_page_token = 5;
@ -252,7 +296,7 @@ message EntityHistoryResponse {
grn.GRN GRN = 1;
// Entity metadata without the raw bytes
repeated EntityVersionInfo versions = 2;
repeated Entity versions = 2;
// More results exist... pass this in the next request
string next_page_token = 3;
@ -263,12 +307,12 @@ message EntityHistoryResponse {
// List request/response
//-----------------------------------------------
message EntitySearchRequest {
message EntityListRequest {
// Starting from the requested page (other query parameters must match!)
string next_page_token = 1;
// Maximum number of items to return
int64 limit = 2;
int64 limit = 2;
// Free text query string -- mileage may vary :)
string query = 3;
@ -276,6 +320,9 @@ message EntitySearchRequest {
// limit to a specific kind (empty is all)
repeated string kind = 4;
// limit to a specific key
repeated string key = 11;
// Limit results to items in a specific folder
string folder = 5;
@ -295,54 +342,22 @@ message EntitySearchRequest {
bool with_fields = 10;
}
// Search result metadata for each entity
message EntitySearchResult {
// Entity identifier
grn.GRN GRN = 1;
message ReferenceRequest {
// Starting from the requested page (other query parameters must match!)
string next_page_token = 1;
// The current version of this entity
string version = 2;
// Maximum number of items to return
int64 limit = 2;
// Content Length
int64 size = 3;
// Free text query string -- mileage may vary :)
string kind = 3;
// Time in epoch milliseconds that the entity was updated
int64 updated_at = 4;
// Who updated the entity
string updated_by = 5;
// Optionally include the full entity body
bytes body = 6;
//----------------------------------------
// Derived from body in the summary
//----------------------------------------
// Always included
string name = 7;
// Always included
string description = 8;
// The structured labels
map<string,string> labels = 9;
// Folder UID
string folder = 10;
// Slugified name
string slug = 11;
// Optionally include extracted JSON
bytes fields_json = 12;
// EntityErrorInfo in json
bytes error_json = 13;
// Free text query string -- mileage may vary :)
string uid = 4;
}
message EntitySearchResponse {
repeated EntitySearchResult results = 1;
message EntityListResponse {
repeated Entity results = 1;
// More results exist... pass this in the next request
string next_page_token = 2;
@ -353,9 +368,9 @@ message EntitySearchResponse {
//-----------------------------------------------
message EntityWatchRequest {
// Timestamp of last changes. Empty will default to
int64 since = 1;
// Timestamp of last changes. Empty will default to
int64 since = 1;
// Watch sppecific entities
repeated grn.GRN GRN = 2;
@ -380,7 +395,7 @@ message EntityWatchRequest {
message EntityWatchResponse {
// Timestamp the event was sent
int64 timestamp = 1;
int64 timestamp = 1;
// List of entities with the same action
repeated Entity entity = 2;
@ -398,47 +413,47 @@ message EntityWatchResponse {
message EntitySummary {
string UID = 1;
string kind = 2;
string kind = 2;
string name = 3;
string description = 4;
string name = 3;
string description = 4;
// Key value pairs. Tags are are represented as keys with empty values
map<string,string> labels = 5;
// Key value pairs. Tags are are represented as keys with empty values
map<string,string> labels = 5;
// Parent folder UID
string folder = 6;
// Parent folder UID
string folder = 6;
// URL safe version of the name. It will be unique within the folder
string slug = 7;
// URL safe version of the name. It will be unique within the folder
string slug = 7;
// When errors exist
EntityErrorInfo error = 8;
// When errors exist
EntityErrorInfo error = 8;
// Optional field values. The schema will define and document possible values for a given kind
map<string, string> fields = 9;
// Optional field values. The schema will define and document possible values for a given kind
map<string, string> fields = 9;
// eg: panels within dashboard
repeated EntitySummary nested = 10;
// eg: panels within dashboard
repeated EntitySummary nested = 10;
// Optional references to external things
repeated EntityExternalReference references = 11;
// Optional references to external things
repeated EntityExternalReference references = 11;
}
message EntityExternalReference {
// Category of dependency
// eg: datasource, plugin, runtime
string family = 1;
// Category of dependency
// eg: datasource, plugin, runtime
string family = 1;
// datasource > prometheus|influx|...
// plugin > panel | datasource
// runtime > transformer
string type = 2;
// datasource > prometheus|influx|...
// plugin > panel | datasource
// runtime > transformer
string type = 2;
// datasource > UID
// plugin > plugin identifier
// runtime > name lookup
string identifier = 3;
// datasource > UID
// plugin > plugin identifier
// runtime > name lookup
string identifier = 3;
}
@ -451,11 +466,13 @@ service EntityStore {
rpc Read(ReadEntityRequest) returns (Entity);
rpc BatchRead(BatchReadEntityRequest) returns (BatchReadEntityResponse);
rpc Write(WriteEntityRequest) returns (WriteEntityResponse);
rpc Create(CreateEntityRequest) returns (CreateEntityResponse);
rpc Update(UpdateEntityRequest) returns (UpdateEntityResponse);
rpc Delete(DeleteEntityRequest) returns (DeleteEntityResponse);
rpc History(EntityHistoryRequest) returns (EntityHistoryResponse);
rpc Search(EntitySearchRequest) returns (EntitySearchResponse);
rpc List(EntityListRequest) returns (EntityListResponse);
rpc Watch(EntityWatchRequest) returns (stream EntityWatchResponse);
// TEMPORARY... while we split this into a new service (see below)
rpc AdminWrite(AdminWriteEntityRequest) returns (WriteEntityResponse);
}

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.24.4
// - protoc v4.23.4
// source: entity.proto
package entity
@ -22,9 +22,11 @@ const (
EntityStore_Read_FullMethodName = "/entity.EntityStore/Read"
EntityStore_BatchRead_FullMethodName = "/entity.EntityStore/BatchRead"
EntityStore_Write_FullMethodName = "/entity.EntityStore/Write"
EntityStore_Create_FullMethodName = "/entity.EntityStore/Create"
EntityStore_Update_FullMethodName = "/entity.EntityStore/Update"
EntityStore_Delete_FullMethodName = "/entity.EntityStore/Delete"
EntityStore_History_FullMethodName = "/entity.EntityStore/History"
EntityStore_Search_FullMethodName = "/entity.EntityStore/Search"
EntityStore_List_FullMethodName = "/entity.EntityStore/List"
EntityStore_Watch_FullMethodName = "/entity.EntityStore/Watch"
EntityStore_AdminWrite_FullMethodName = "/entity.EntityStore/AdminWrite"
)
@ -36,9 +38,11 @@ type EntityStoreClient interface {
Read(ctx context.Context, in *ReadEntityRequest, opts ...grpc.CallOption) (*Entity, error)
BatchRead(ctx context.Context, in *BatchReadEntityRequest, opts ...grpc.CallOption) (*BatchReadEntityResponse, error)
Write(ctx context.Context, in *WriteEntityRequest, opts ...grpc.CallOption) (*WriteEntityResponse, error)
Create(ctx context.Context, in *CreateEntityRequest, opts ...grpc.CallOption) (*CreateEntityResponse, error)
Update(ctx context.Context, in *UpdateEntityRequest, opts ...grpc.CallOption) (*UpdateEntityResponse, error)
Delete(ctx context.Context, in *DeleteEntityRequest, opts ...grpc.CallOption) (*DeleteEntityResponse, error)
History(ctx context.Context, in *EntityHistoryRequest, opts ...grpc.CallOption) (*EntityHistoryResponse, error)
Search(ctx context.Context, in *EntitySearchRequest, opts ...grpc.CallOption) (*EntitySearchResponse, error)
List(ctx context.Context, in *EntityListRequest, opts ...grpc.CallOption) (*EntityListResponse, error)
Watch(ctx context.Context, in *EntityWatchRequest, opts ...grpc.CallOption) (EntityStore_WatchClient, error)
// TEMPORARY... while we split this into a new service (see below)
AdminWrite(ctx context.Context, in *AdminWriteEntityRequest, opts ...grpc.CallOption) (*WriteEntityResponse, error)
@ -79,6 +83,24 @@ func (c *entityStoreClient) Write(ctx context.Context, in *WriteEntityRequest, o
return out, nil
}
func (c *entityStoreClient) Create(ctx context.Context, in *CreateEntityRequest, opts ...grpc.CallOption) (*CreateEntityResponse, error) {
out := new(CreateEntityResponse)
err := c.cc.Invoke(ctx, EntityStore_Create_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *entityStoreClient) Update(ctx context.Context, in *UpdateEntityRequest, opts ...grpc.CallOption) (*UpdateEntityResponse, error) {
out := new(UpdateEntityResponse)
err := c.cc.Invoke(ctx, EntityStore_Update_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *entityStoreClient) Delete(ctx context.Context, in *DeleteEntityRequest, opts ...grpc.CallOption) (*DeleteEntityResponse, error) {
out := new(DeleteEntityResponse)
err := c.cc.Invoke(ctx, EntityStore_Delete_FullMethodName, in, out, opts...)
@ -97,9 +119,9 @@ func (c *entityStoreClient) History(ctx context.Context, in *EntityHistoryReques
return out, nil
}
func (c *entityStoreClient) Search(ctx context.Context, in *EntitySearchRequest, opts ...grpc.CallOption) (*EntitySearchResponse, error) {
out := new(EntitySearchResponse)
err := c.cc.Invoke(ctx, EntityStore_Search_FullMethodName, in, out, opts...)
func (c *entityStoreClient) List(ctx context.Context, in *EntityListRequest, opts ...grpc.CallOption) (*EntityListResponse, error) {
out := new(EntityListResponse)
err := c.cc.Invoke(ctx, EntityStore_List_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@ -154,9 +176,11 @@ type EntityStoreServer interface {
Read(context.Context, *ReadEntityRequest) (*Entity, error)
BatchRead(context.Context, *BatchReadEntityRequest) (*BatchReadEntityResponse, error)
Write(context.Context, *WriteEntityRequest) (*WriteEntityResponse, error)
Create(context.Context, *CreateEntityRequest) (*CreateEntityResponse, error)
Update(context.Context, *UpdateEntityRequest) (*UpdateEntityResponse, error)
Delete(context.Context, *DeleteEntityRequest) (*DeleteEntityResponse, error)
History(context.Context, *EntityHistoryRequest) (*EntityHistoryResponse, error)
Search(context.Context, *EntitySearchRequest) (*EntitySearchResponse, error)
List(context.Context, *EntityListRequest) (*EntityListResponse, error)
Watch(*EntityWatchRequest, EntityStore_WatchServer) error
// TEMPORARY... while we split this into a new service (see below)
AdminWrite(context.Context, *AdminWriteEntityRequest) (*WriteEntityResponse, error)
@ -175,14 +199,20 @@ func (UnimplementedEntityStoreServer) BatchRead(context.Context, *BatchReadEntit
func (UnimplementedEntityStoreServer) Write(context.Context, *WriteEntityRequest) (*WriteEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Write not implemented")
}
func (UnimplementedEntityStoreServer) Create(context.Context, *CreateEntityRequest) (*CreateEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
}
func (UnimplementedEntityStoreServer) Update(context.Context, *UpdateEntityRequest) (*UpdateEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
}
func (UnimplementedEntityStoreServer) Delete(context.Context, *DeleteEntityRequest) (*DeleteEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
}
func (UnimplementedEntityStoreServer) History(context.Context, *EntityHistoryRequest) (*EntityHistoryResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method History not implemented")
}
func (UnimplementedEntityStoreServer) Search(context.Context, *EntitySearchRequest) (*EntitySearchResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Search not implemented")
func (UnimplementedEntityStoreServer) List(context.Context, *EntityListRequest) (*EntityListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
}
func (UnimplementedEntityStoreServer) Watch(*EntityWatchRequest, EntityStore_WatchServer) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
@ -256,6 +286,42 @@ func _EntityStore_Write_Handler(srv interface{}, ctx context.Context, dec func(i
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateEntityRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).Create(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_Create_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).Create(ctx, req.(*CreateEntityRequest))
}
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateEntityRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).Update(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_Update_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).Update(ctx, req.(*UpdateEntityRequest))
}
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteEntityRequest)
if err := dec(in); err != nil {
@ -292,20 +358,20 @@ func _EntityStore_History_Handler(srv interface{}, ctx context.Context, dec func
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EntitySearchRequest)
func _EntityStore_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EntityListRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).Search(ctx, in)
return srv.(EntityStoreServer).List(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_Search_FullMethodName,
FullMethod: EntityStore_List_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).Search(ctx, req.(*EntitySearchRequest))
return srv.(EntityStoreServer).List(ctx, req.(*EntityListRequest))
}
return interceptor(ctx, in, info, handler)
}
@ -368,6 +434,14 @@ var EntityStore_ServiceDesc = grpc.ServiceDesc{
MethodName: "Write",
Handler: _EntityStore_Write_Handler,
},
{
MethodName: "Create",
Handler: _EntityStore_Create_Handler,
},
{
MethodName: "Update",
Handler: _EntityStore_Update_Handler,
},
{
MethodName: "Delete",
Handler: _EntityStore_Delete_Handler,
@ -377,8 +451,8 @@ var EntityStore_ServiceDesc = grpc.ServiceDesc{
Handler: _EntityStore_History_Handler,
},
{
MethodName: "Search",
Handler: _EntityStore_Search_Handler,
MethodName: "List",
Handler: _EntityStore_List_Handler,
},
{
MethodName: "AdminWrite",

View File

@ -1,303 +0,0 @@
package entity
import (
"encoding/base64"
"encoding/json"
"fmt"
"unsafe"
jsoniter "github.com/json-iterator/go"
"github.com/grafana/grafana/pkg/infra/grn"
)
func init() { //nolint:gochecknoinits
jsoniter.RegisterTypeEncoder("entity.EntitySearchResult", &searchResultCodec{})
jsoniter.RegisterTypeEncoder("entity.WriteEntityResponse", &writeResponseCodec{})
jsoniter.RegisterTypeEncoder("entity.Entity", &rawEntityCodec{})
jsoniter.RegisterTypeDecoder("entity.Entity", &rawEntityCodec{})
}
func writeRawJson(stream *jsoniter.Stream, val []byte) {
if json.Valid(val) {
_, _ = stream.Write(val)
} else {
stream.WriteString(string(val))
}
}
// Unlike the standard JSON marshal, this will write bytes as JSON when it can
type rawEntityCodec struct{}
func (obj *Entity) MarshalJSON() ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
return json.Marshal(obj)
}
// UnmarshalJSON will read JSON into a Entity
func (obj *Entity) UnmarshalJSON(b []byte) error {
if obj == nil {
return fmt.Errorf("unexpected nil for raw objcet")
}
iter := jsoniter.ParseBytes(jsoniter.ConfigDefault, b)
readEntity(iter, obj)
return iter.Error
}
func (codec *rawEntityCodec) IsEmpty(ptr unsafe.Pointer) bool {
f := (*Entity)(ptr)
return f.GRN == nil && f.Body == nil
}
func (codec *rawEntityCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) {
obj := (*Entity)(ptr)
stream.WriteObjectStart()
stream.WriteObjectField("GRN")
stream.WriteVal(obj.GRN)
if obj.Version != "" {
stream.WriteMore()
stream.WriteObjectField("version")
stream.WriteString(obj.Version)
}
if obj.CreatedAt > 0 {
stream.WriteMore()
stream.WriteObjectField("createdAt")
stream.WriteInt64(obj.CreatedAt)
}
if obj.UpdatedAt > 0 {
stream.WriteMore()
stream.WriteObjectField("updatedAt")
stream.WriteInt64(obj.UpdatedAt)
}
if obj.CreatedBy != "" {
stream.WriteMore()
stream.WriteObjectField("createdBy")
stream.WriteString(obj.CreatedBy)
}
if obj.UpdatedBy != "" {
stream.WriteMore()
stream.WriteObjectField("updatedBy")
stream.WriteString(obj.UpdatedBy)
}
if obj.Folder != "" {
stream.WriteMore()
stream.WriteObjectField("folder")
stream.WriteString(obj.Folder)
}
if obj.Body != nil {
stream.WriteMore()
if json.Valid(obj.Body) {
stream.WriteObjectField("body")
stream.WriteRaw(string(obj.Body)) // works for strings
} else {
sEnc := base64.StdEncoding.EncodeToString(obj.Body)
stream.WriteObjectField("body_base64")
stream.WriteString(sEnc) // works for strings
}
}
if len(obj.SummaryJson) > 0 {
stream.WriteMore()
stream.WriteObjectField("summary")
writeRawJson(stream, obj.SummaryJson)
}
if obj.ETag != "" {
stream.WriteMore()
stream.WriteObjectField("etag")
stream.WriteString(obj.ETag)
}
if obj.Size > 0 {
stream.WriteMore()
stream.WriteObjectField("size")
stream.WriteInt64(obj.Size)
}
if obj.Origin != nil {
stream.WriteMore()
stream.WriteObjectField("origin")
stream.WriteVal(obj.Origin)
}
stream.WriteObjectEnd()
}
func (codec *rawEntityCodec) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
*(*Entity)(ptr) = Entity{}
raw := (*Entity)(ptr)
readEntity(iter, raw)
}
func readEntity(iter *jsoniter.Iterator, raw *Entity) {
for l1Field := iter.ReadObject(); l1Field != ""; l1Field = iter.ReadObject() {
switch l1Field {
case "GRN":
raw.GRN = &grn.GRN{}
iter.ReadVal(raw.GRN)
case "updatedAt":
raw.UpdatedAt = iter.ReadInt64()
case "updatedBy":
raw.UpdatedBy = iter.ReadString()
case "createdAt":
raw.CreatedAt = iter.ReadInt64()
case "createdBy":
raw.CreatedBy = iter.ReadString()
case "size":
raw.Size = iter.ReadInt64()
case "etag":
raw.ETag = iter.ReadString()
case "version":
raw.Version = iter.ReadString()
case "folder":
raw.Folder = iter.ReadString()
case "origin":
raw.Origin = &EntityOriginInfo{}
iter.ReadVal(raw.Origin)
case "summary":
var val interface{}
iter.ReadVal(&val) // ??? is there a smarter way to just keep the underlying bytes without read+marshal
body, err := json.Marshal(val)
if err != nil {
iter.ReportError("raw entity", "error reading summary body")
return
}
raw.SummaryJson = body
case "body":
var val interface{}
iter.ReadVal(&val) // ??? is there a smarter way to just keep the underlying bytes without read+marshal
body, err := json.Marshal(val)
if err != nil {
iter.ReportError("raw entity", "error creating json from body")
return
}
raw.Body = body
case "body_base64":
val := iter.ReadString()
body, err := base64.StdEncoding.DecodeString(val)
if err != nil {
iter.ReportError("raw entity", "error decoding base64 body")
return
}
raw.Body = body
default:
iter.ReportError("raw object", "unexpected field: "+l1Field)
return
}
}
}
// Unlike the standard JSON marshal, this will write bytes as JSON when it can
type searchResultCodec struct{}
func (obj *EntitySearchResult) MarshalJSON() ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
return json.Marshal(obj)
}
func (codec *searchResultCodec) IsEmpty(ptr unsafe.Pointer) bool {
f := (*EntitySearchResult)(ptr)
return f.GRN == nil && f.Body == nil
}
func (codec *searchResultCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) {
obj := (*EntitySearchResult)(ptr)
stream.WriteObjectStart()
stream.WriteObjectField("GRN")
stream.WriteVal(obj.GRN)
if obj.Name != "" {
stream.WriteMore()
stream.WriteObjectField("name")
stream.WriteString(obj.Name)
}
if obj.Description != "" {
stream.WriteMore()
stream.WriteObjectField("description")
stream.WriteString(obj.Description)
}
if obj.Size > 0 {
stream.WriteMore()
stream.WriteObjectField("size")
stream.WriteInt64(obj.Size)
}
if obj.UpdatedAt > 0 {
stream.WriteMore()
stream.WriteObjectField("updatedAt")
stream.WriteInt64(obj.UpdatedAt)
}
if obj.UpdatedBy != "" {
stream.WriteMore()
stream.WriteObjectField("updatedBy")
stream.WriteVal(obj.UpdatedBy)
}
if obj.Body != nil {
stream.WriteMore()
if json.Valid(obj.Body) {
stream.WriteObjectField("body")
_, _ = stream.Write(obj.Body) // works for strings
} else {
stream.WriteObjectField("body_base64")
stream.WriteVal(obj.Body) // works for strings
}
}
if obj.Labels != nil {
stream.WriteMore()
stream.WriteObjectField("labels")
stream.WriteVal(obj.Labels)
}
if obj.ErrorJson != nil {
stream.WriteMore()
stream.WriteObjectField("error")
writeRawJson(stream, obj.ErrorJson)
}
if obj.FieldsJson != nil {
stream.WriteMore()
stream.WriteObjectField("fields")
writeRawJson(stream, obj.FieldsJson)
}
stream.WriteObjectEnd()
}
// Unlike the standard JSON marshal, this will write bytes as JSON when it can
type writeResponseCodec struct{}
func (obj *WriteEntityResponse) MarshalJSON() ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
return json.Marshal(obj)
}
func (codec *writeResponseCodec) IsEmpty(ptr unsafe.Pointer) bool {
f := (*WriteEntityResponse)(ptr)
return f == nil
}
func (codec *writeResponseCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) {
obj := (*WriteEntityResponse)(ptr)
stream.WriteObjectStart()
stream.WriteObjectField("status")
stream.WriteString(obj.Status.String())
if obj.Error != nil {
stream.WriteMore()
stream.WriteObjectField("error")
stream.WriteVal(obj.Error)
}
if obj.GRN != nil {
stream.WriteMore()
stream.WriteObjectField("GRN")
stream.WriteVal(obj.GRN)
}
if obj.Entity != nil {
stream.WriteMore()
stream.WriteObjectField("entity")
stream.WriteVal(obj.Entity)
}
if len(obj.SummaryJson) > 0 {
stream.WriteMore()
stream.WriteObjectField("summary")
writeRawJson(stream, obj.SummaryJson)
}
stream.WriteObjectEnd()
}

View File

@ -1,50 +0,0 @@
package entity
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/infra/grn"
)
func TestRawEncoders(t *testing.T) {
body, err := json.Marshal(map[string]any{
"hello": "world",
"field": 1.23,
})
require.NoError(t, err)
raw := &Entity{
GRN: &grn.GRN{
ResourceIdentifier: "a",
ResourceKind: "b",
},
Version: "c",
ETag: "d",
Body: body,
}
b, err := json.MarshalIndent(raw, "", " ")
require.NoError(t, err)
str := string(b)
require.JSONEq(t, `{
"GRN": {
"ResourceKind": "b",
"ResourceIdentifier": "a"
},
"version": "c",
"body": {
"field": 1.23,
"hello": "world"
},
"etag": "d"
}`, str)
copy := &Entity{}
err = json.Unmarshal(b, copy)
require.NoError(t, err)
}

View File

@ -6,64 +6,116 @@ import (
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
)
func getLatinPathColumn(name string) *migrator.Column {
return &migrator.Column{
Name: name,
Type: migrator.DB_NVarchar,
Length: 1024,
Nullable: false,
IsLatin: true, // only used in MySQL
}
}
func initEntityTables(mg *migrator.Migrator) string {
marker := "Initialize entity tables (v005)" // changing this key wipe+rewrite everything
mg.AddMigration(marker, &migrator.RawSQLMigration{})
func initEntityTables(mg *migrator.Migrator) {
grnLength := 256 // len(tenant)~8 + len(kind)!16 + len(kind)~128 = 256
tables := []migrator.Table{}
tables = append(tables, migrator.Table{
Name: "entity",
Columns: []*migrator.Column{
// Object ID (OID) will be unique across all objects/instances
// uuid5( tenant_id, kind + uid )
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false, IsPrimaryKey: true},
// primary identifier
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false, IsPrimaryKey: true},
{Name: "version", Type: migrator.DB_NVarchar, Length: 128, Nullable: false},
// The entity identifier
{Name: "tenant_id", Type: migrator.DB_BigInt, Nullable: false},
{Name: "kind", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "uid", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "folder", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "slug", Type: migrator.DB_NVarchar, Length: 189, Nullable: false}, // from title
{Name: "key", Type: migrator.DB_Text, Nullable: false},
{Name: "group", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "group_version", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "kind", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "uid", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "folder", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // uid of folder
// The raw entity body (any byte array)
{Name: "body", Type: migrator.DB_LongBlob, Nullable: true}, // null when nested or remote
{Name: "meta", Type: migrator.DB_Text, Nullable: true}, // raw meta object from k8s (with standard stuff removed)
{Name: "body", Type: migrator.DB_LongText, Nullable: true}, // null when nested or remote
{Name: "status", Type: migrator.DB_Text, Nullable: true}, // raw status object
{Name: "size", Type: migrator.DB_BigInt, Nullable: false},
{Name: "etag", Type: migrator.DB_NVarchar, Length: 32, Nullable: false, IsLatin: true}, // md5(body)
{Name: "version", Type: migrator.DB_NVarchar, Length: 128, Nullable: false},
// Who changed what when -- We should avoid JOINs with other tables in the database
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
// Who changed what when
{Name: "created_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "created_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
// Mark objects with origin metadata
{Name: "origin", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
getLatinPathColumn("origin_key"), // index with length 1024
{Name: "origin_key", Type: migrator.DB_Text, Nullable: false},
{Name: "origin_ts", Type: migrator.DB_BigInt, Nullable: false},
// Summary data (always extracted from the `body` column)
{Name: "name", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "description", Type: migrator.DB_NVarchar, Length: 255, Nullable: true},
{Name: "labels", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "fields", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "errors", Type: migrator.DB_Text, Nullable: true}, // JSON object
// Metadata
{Name: "name", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "slug", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // from title
{Name: "description", Type: migrator.DB_Text, Nullable: true},
// Commit message
{Name: "message", Type: migrator.DB_Text, Nullable: false}, // defaults to empty string
{Name: "labels", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "fields", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "errors", Type: migrator.DB_Text, Nullable: true}, // JSON object
},
Indices: []*migrator.Index{
{Cols: []string{"kind"}},
{Cols: []string{"folder"}},
{Cols: []string{"uid"}},
{Cols: []string{"tenant_id", "kind", "uid"}, Type: migrator.UniqueIndex},
// {Cols: []string{"tenant_id", "folder", "slug"}, Type: migrator.UniqueIndex},
{Cols: []string{"folder"}, Type: migrator.IndexType},
},
})
tables = append(tables, migrator.Table{
Name: "entity_history",
Columns: []*migrator.Column{
// only difference from entity table is that we store multiple versions of the same entity
// so we have a unique index on guid+version instead of guid as primary key
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
{Name: "version", Type: migrator.DB_NVarchar, Length: 128, Nullable: false},
// The entity identifier
{Name: "tenant_id", Type: migrator.DB_BigInt, Nullable: false},
{Name: "key", Type: migrator.DB_Text, Nullable: false},
{Name: "group", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "group_version", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "kind", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "uid", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "folder", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // uid of folder
{Name: "access", Type: migrator.DB_Text, Nullable: true}, // JSON object
// The raw entity body (any byte array)
{Name: "meta", Type: migrator.DB_Text, Nullable: true}, // raw meta object from k8s (with standard stuff removed)
{Name: "body", Type: migrator.DB_LongText, Nullable: true}, // null when nested or remote
{Name: "status", Type: migrator.DB_Text, Nullable: true}, // raw status object
{Name: "size", Type: migrator.DB_BigInt, Nullable: false},
{Name: "etag", Type: migrator.DB_NVarchar, Length: 32, Nullable: false, IsLatin: true}, // md5(body)
// Who changed what when
{Name: "created_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "created_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
// Mark objects with origin metadata
{Name: "origin", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "origin_key", Type: migrator.DB_Text, Nullable: false},
{Name: "origin_ts", Type: migrator.DB_BigInt, Nullable: false},
// Metadata
{Name: "name", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "slug", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // from title
{Name: "description", Type: migrator.DB_Text, Nullable: true},
// Commit message
{Name: "message", Type: migrator.DB_Text, Nullable: false}, // defaults to empty string
{Name: "labels", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "fields", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "errors", Type: migrator.DB_Text, Nullable: true}, // JSON object
},
Indices: []*migrator.Index{
{Cols: []string{"guid", "version"}, Type: migrator.UniqueIndex},
{Cols: []string{"tenant_id", "kind", "uid", "version"}, Type: migrator.UniqueIndex},
},
})
@ -71,33 +123,26 @@ func initEntityTables(mg *migrator.Migrator) {
tables = append(tables, migrator.Table{
Name: "entity_folder",
Columns: []*migrator.Column{
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false, IsPrimaryKey: true},
{Name: "tenant_id", Type: migrator.DB_BigInt, Nullable: false},
{Name: "uid", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
getLatinPathColumn("slug_path"), ///slug/slug/slug/
{Name: "tree", Type: migrator.DB_Text, Nullable: false}, // JSON []{uid, title}
{Name: "depth", Type: migrator.DB_Int, Nullable: false}, // starts at 1
{Name: "left", Type: migrator.DB_Int, Nullable: false}, // MPTT
{Name: "right", Type: migrator.DB_Int, Nullable: false}, // MPTT
{Name: "detached", Type: migrator.DB_Bool, Nullable: false}, // a parent folder was not found
},
Indices: []*migrator.Index{
{Cols: []string{"tenant_id", "uid"}, Type: migrator.UniqueIndex},
// {Cols: []string{"tenant_id", "slug_path"}, Type: migrator.UniqueIndex},
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false, IsPrimaryKey: true},
{Name: "slug_path", Type: migrator.DB_Text, Nullable: false}, // /slug/slug/slug/
{Name: "tree", Type: migrator.DB_Text, Nullable: false}, // JSON []{uid, title}
{Name: "depth", Type: migrator.DB_Int, Nullable: false}, // starts at 1
{Name: "lft", Type: migrator.DB_Int, Nullable: false}, // MPTT
{Name: "rgt", Type: migrator.DB_Int, Nullable: false}, // MPTT
{Name: "detached", Type: migrator.DB_Bool, Nullable: false}, // a parent folder was not found
},
})
tables = append(tables, migrator.Table{
Name: "entity_labels",
Columns: []*migrator.Column{
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false},
{Name: "label", Type: migrator.DB_NVarchar, Length: 191, Nullable: false},
{Name: "value", Type: migrator.DB_NVarchar, Length: 1024, Nullable: false},
{Name: "parent_grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: true},
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
{Name: "label", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "value", Type: migrator.DB_Text, Nullable: false},
},
Indices: []*migrator.Index{
{Cols: []string{"grn", "label"}, Type: migrator.UniqueIndex},
{Cols: []string{"parent_grn"}, Type: migrator.IndexType},
{Cols: []string{"guid", "label"}, Type: migrator.UniqueIndex},
},
})
@ -105,78 +150,24 @@ func initEntityTables(mg *migrator.Migrator) {
Name: "entity_ref",
Columns: []*migrator.Column{
// Source:
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false},
{Name: "parent_grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: true},
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
// Address (defined in the body, not resolved, may be invalid and change)
{Name: "family", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "type", Type: migrator.DB_NVarchar, Length: 255, Nullable: true},
{Name: "id", Type: migrator.DB_NVarchar, Length: 1024, Nullable: true},
{Name: "family", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "type", Type: migrator.DB_NVarchar, Length: 190, Nullable: true},
{Name: "id", Type: migrator.DB_NVarchar, Length: 190, Nullable: true},
// Runtime calcs (will depend on the system state)
{Name: "resolved_ok", Type: migrator.DB_Bool, Nullable: false},
{Name: "resolved_to", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "resolved_warning", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "resolved_to", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
{Name: "resolved_warning", Type: migrator.DB_Text, Nullable: false},
{Name: "resolved_time", Type: migrator.DB_DateTime, Nullable: false}, // resolution cache timestamp
},
Indices: []*migrator.Index{
{Cols: []string{"grn"}, Type: migrator.IndexType},
{Cols: []string{"guid"}, Type: migrator.IndexType},
{Cols: []string{"family"}, Type: migrator.IndexType},
{Cols: []string{"type"}, Type: migrator.IndexType},
{Cols: []string{"resolved_to"}, Type: migrator.IndexType},
{Cols: []string{"parent_grn"}, Type: migrator.IndexType},
},
})
tables = append(tables, migrator.Table{
Name: "entity_history",
Columns: []*migrator.Column{
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false},
{Name: "version", Type: migrator.DB_NVarchar, Length: 128, Nullable: false},
// Raw bytes
{Name: "body", Type: migrator.DB_LongBlob, Nullable: false},
{Name: "size", Type: migrator.DB_BigInt, Nullable: false},
{Name: "etag", Type: migrator.DB_NVarchar, Length: 32, Nullable: false, IsLatin: true}, // md5(body)
// Who changed what when
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
// Commit message
{Name: "message", Type: migrator.DB_Text, Nullable: false}, // defaults to empty string
},
Indices: []*migrator.Index{
{Cols: []string{"grn", "version"}, Type: migrator.UniqueIndex},
{Cols: []string{"updated_by"}, Type: migrator.IndexType},
},
})
tables = append(tables, migrator.Table{
Name: "entity_nested",
Columns: []*migrator.Column{
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false, IsPrimaryKey: true},
{Name: "parent_grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false},
// The entity identifier
{Name: "tenant_id", Type: migrator.DB_BigInt, Nullable: false},
{Name: "kind", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "uid", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "folder", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
// Summary data (always extracted from the `body` column)
{Name: "name", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "description", Type: migrator.DB_NVarchar, Length: 255, Nullable: true},
{Name: "labels", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "fields", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "errors", Type: migrator.DB_Text, Nullable: true}, // JSON object
},
Indices: []*migrator.Index{
{Cols: []string{"parent_grn"}},
{Cols: []string{"kind"}},
{Cols: []string{"folder"}},
{Cols: []string{"uid"}},
{Cols: []string{"tenant_id", "kind", "uid"}, Type: migrator.UniqueIndex},
},
})
@ -189,8 +180,5 @@ func initEntityTables(mg *migrator.Migrator) {
}
}
mg.AddMigration("set path collation on entity table", migrator.NewRawSQLMigration("").
// MySQL `utf8mb4_unicode_ci` collation is set in `mysql_dialect.go`
// SQLite uses a `BINARY` collation by default
Postgres("ALTER TABLE entity_folder ALTER COLUMN slug_path TYPE VARCHAR(1024) COLLATE \"C\";")) // Collate C - sorting done based on character code byte values
return marker
}

View File

@ -4,69 +4,73 @@ import (
"context"
"fmt"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/services/sqlstore/session"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
)
func MigrateEntityStore(xdb db.DB, features featuremgmt.FeatureToggles) error {
func MigrateEntityStore(db sqlstash.EntityDB, features featuremgmt.FeatureToggles) error {
// Skip if feature flag is not enabled
if !features.IsEnabledGlobally(featuremgmt.FlagEntityStore) {
if !features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorage) {
return nil
}
// Migrations depend on upstream xorm implementations
sql, ok := xdb.(*sqlstore.SQLStore)
if !ok {
return nil
engine, err := db.GetEngine()
if err != nil {
return err
}
// !!! This should not run in production!
// The object store SQL schema is still in active development and this
// will only be called when the feature toggle is enabled
// this check should not be necessary, but is added as an extra check
if setting.Env == setting.Prod {
return nil
}
marker := "Initialize entity tables (v0)" // changing this key wipe+rewrite everything
mg := migrator.NewScopedMigrator(sql.GetEngine(), sql.Cfg, "entity")
mg := migrator.NewScopedMigrator(engine, db.GetCfg(), "entity")
mg.AddCreateMigration()
mg.AddMigration(marker, &migrator.RawSQLMigration{})
initEntityTables(mg)
marker := initEntityTables(mg)
// While this feature is under development, we can completly wipe and recreate
// The initial plan is to keep the source of truth in existing SQL tables, and mirrot it
// to a kubernetes model. Once the kubernetes model needs to be preserved,
// this code should be removed
log, err := mg.GetMigrationLog()
exists, err := engine.IsTableExist("entity_migration_log")
if err != nil {
return err
}
_, found := log[marker]
if !found && len(log) > 0 {
// Remove the migration log (and potential other orphan tables)
tables := []string{"entity_migration_log"}
ctx := context.Background()
err = sql.GetSqlxSession().WithTransaction(ctx, func(tx *session.SessionTx) error {
for _, t := range tables {
_, err := tx.Exec(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", t))
if err != nil {
return err
}
}
return nil
})
if exists {
log, err := mg.GetMigrationLog()
if err != nil {
return err
}
_, found := log[marker]
if !found && len(log) > 0 {
// Remove the migration log (and potential other orphan tables)
tables := []string{"entity_migration_log"}
ctx := context.Background()
sess, err := db.GetSession()
if err != nil {
return err
}
err = sess.WithTransaction(ctx, func(tx *session.SessionTx) error {
for _, t := range tables {
_, err := tx.Exec(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", t))
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
// remove old entries from in-memory log
for id := range log {
mg.RemoveMigrationLogs(id)
}
}
}
return mg.Start(
features.IsEnabledGlobally(featuremgmt.FlagMigrationLocking),
sql.GetMigrationLockAttemptTimeout())
0)
}

View File

@ -0,0 +1,52 @@
package server
import (
"fmt"
"net"
"strconv"
// "github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/setting"
)
type config struct {
enabled bool
devMode bool
ip net.IP
port int
host string
apiURL string
logLevel int
}
func newConfig(cfg *setting.Cfg) *config {
defaultLogLevel := 0
// TODO
ip := net.ParseIP(cfg.HTTPAddr)
apiURL := cfg.AppURL
port, err := strconv.Atoi(cfg.HTTPPort)
if err != nil {
port = 3001
}
if cfg.Env == setting.Dev {
defaultLogLevel = 10
port = 3001
ip = net.ParseIP("127.0.0.1")
apiURL = fmt.Sprintf("https://%s:%d", ip, port)
}
host := fmt.Sprintf("%s:%d", ip, port)
return &config{
enabled: true, // cfg.IsFeatureToggleEnabled(featuremgmt.FlagGrafanaStorageServer),
devMode: cfg.Env == setting.Dev,
ip: ip,
port: port,
host: host,
logLevel: cfg.SectionWithEnvOverrides("storage-server").Key("log_level").MustInt(defaultLogLevel),
apiURL: apiURL,
}
}

View File

@ -0,0 +1,196 @@
package server
import (
"context"
"fmt"
"strconv"
"github.com/go-jose/go-jose/v3/jwt"
"github.com/grafana/dskit/services"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc/metadata"
"github.com/grafana/grafana/pkg/infra/appcontext"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/modules"
"github.com/grafana/grafana/pkg/registry"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/grpcserver"
"github.com/grafana/grafana/pkg/services/grpcserver/interceptors"
"github.com/grafana/grafana/pkg/services/store/entity"
entityDB "github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
)
var (
_ Service = (*service)(nil)
_ registry.BackgroundService = (*service)(nil)
_ registry.CanBeDisabled = (*service)(nil)
)
func init() {
// do nothing
}
type Service interface {
services.NamedService
registry.BackgroundService
registry.CanBeDisabled
}
type service struct {
*services.BasicService
config *config
cfg *setting.Cfg
features featuremgmt.FeatureToggles
stopCh chan struct{}
stoppedCh chan error
handler grpcserver.Provider
tracing *tracing.TracingService
authenticator interceptors.Authenticator
}
type Authenticator struct{}
func (f *Authenticator) Authenticate(ctx context.Context) (context.Context, error) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil, fmt.Errorf("no metadata found")
}
// TODO: use id token instead of these fields
login := md.Get("grafana-login")[0]
if login == "" {
return nil, fmt.Errorf("no login found in context")
}
userID, err := strconv.ParseInt(md.Get("grafana-userid")[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid user id: %w", err)
}
orgID, err := strconv.ParseInt(md.Get("grafana-orgid")[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid org id: %w", err)
}
// TODO: validate id token
idToken := md.Get("grafana-idtoken")[0]
if idToken == "" {
return nil, fmt.Errorf("no id token found in context")
}
jwtToken, err := jwt.ParseSigned(idToken)
if err != nil {
return nil, fmt.Errorf("invalid id token: %w", err)
}
claims := jwt.Claims{}
err = jwtToken.UnsafeClaimsWithoutVerification(&claims)
if err != nil {
return nil, fmt.Errorf("invalid id token: %w", err)
}
// fmt.Printf("JWT CLAIMS: %+v\n", claims)
return appcontext.WithUser(ctx, &user.SignedInUser{
Login: login,
UserID: userID,
OrgID: orgID,
}), nil
}
var _ interceptors.Authenticator = (*Authenticator)(nil)
func ProvideService(
cfg *setting.Cfg,
features featuremgmt.FeatureToggles,
) (*service, error) {
tracing, err := tracing.ProvideService(cfg)
if err != nil {
return nil, err
}
authn := &Authenticator{}
s := &service{
config: newConfig(cfg),
cfg: cfg,
features: features,
stopCh: make(chan struct{}),
authenticator: authn,
tracing: tracing,
}
// This will be used when running as a dskit service
s.BasicService = services.NewBasicService(s.start, s.running, nil).WithName(modules.StorageServer)
return s, nil
}
func (s *service) IsDisabled() bool {
return !s.config.enabled
}
// Run is an adapter for the BackgroundService interface.
func (s *service) Run(ctx context.Context) error {
if err := s.start(ctx); err != nil {
return err
}
return s.running(ctx)
}
func (s *service) start(ctx context.Context) error {
// TODO: use wire
// TODO: support using grafana db connection?
eDB, err := entityDB.ProvideEntityDB(nil, s.cfg, s.features)
if err != nil {
return err
}
err = eDB.Init()
if err != nil {
return err
}
store, err := sqlstash.ProvideSQLEntityServer(eDB)
if err != nil {
return err
}
s.handler, err = grpcserver.ProvideService(s.cfg, s.features, s.authenticator, s.tracing, prometheus.DefaultRegisterer)
if err != nil {
return err
}
entity.RegisterEntityStoreServer(s.handler.GetServer(), store)
err = s.handler.Run(ctx)
if err != nil {
return err
}
return nil
}
func (s *service) running(ctx context.Context) error {
// skip waiting for the server in prod mode
if !s.config.devMode {
<-ctx.Done()
return nil
}
select {
case err := <-s.stoppedCh:
if err != nil {
return err
}
case <-ctx.Done():
close(s.stopCh)
}
return nil
}

View File

@ -4,18 +4,19 @@ import (
"context"
"encoding/json"
"github.com/grafana/grafana/pkg/infra/grn"
"github.com/grafana/grafana/pkg/services/sqlstore/session"
"github.com/grafana/grafana/pkg/services/store/entity"
)
type folderInfo struct {
UID string `json:"uid"`
Name string `json:"name"` // original display name
Slug string `json:"slug"` // full slug
Guid string `json:"guid"`
UID string `json:"uid"`
Name string `json:"name"` // original display name
SlugPath string `json:"slug"` // full slug path
// original slug
originalSlug string
Slug string `json:"-"`
depth int32
left int32
@ -33,51 +34,48 @@ type folderInfo struct {
// This will replace all entries in `entity_folder`
// This is pretty heavy weight, but it does give us a sorted folder list
// NOTE: this could be done async with a mutex/lock? reconciler pattern
func updateFolderTree(ctx context.Context, tx *session.SessionTx, tenant int64) error {
_, err := tx.Exec(ctx, "DELETE FROM entity_folder WHERE tenant_id=?", tenant)
func updateFolderTree(ctx context.Context, tx *session.SessionTx, tenantId int64) error {
_, err := tx.Exec(ctx, "DELETE FROM entity_folder WHERE tenant_id=?", tenantId)
if err != nil {
return err
}
query := "SELECT guid,uid,folder,name,slug" +
" FROM entity" +
" WHERE kind=? AND tenant_id=?" +
" ORDER BY slug asc"
args := []interface{}{entity.StandardKindFolder, tenantId}
all := []*folderInfo{}
rows, err := tx.Query(ctx, "SELECT uid,folder,name,slug FROM entity WHERE kind=? AND tenant_id=? ORDER BY slug asc;",
entity.StandardKindFolder, tenant)
rows, err := tx.Query(ctx, query, args...)
if err != nil {
return err
}
defer func() { _ = rows.Close() }()
for rows.Next() {
folder := folderInfo{
children: []*folderInfo{},
}
err = rows.Scan(&folder.UID, &folder.parentUID, &folder.Name, &folder.originalSlug)
err = rows.Scan(&folder.Guid, &folder.UID, &folder.parentUID, &folder.Name, &folder.Slug)
if err != nil {
break
return err
}
all = append(all, &folder)
}
errClose := rows.Close()
// TODO: Use some kind of multi-error.
// Until then, we want to prioritize errors coming from the .Scan
// over those coming from .Close.
if err != nil {
return err
}
if errClose != nil {
return errClose
}
root, lost, err := buildFolderTree(all)
if err != nil {
return err
}
err = insertFolderInfo(ctx, tx, tenant, root, false)
err = insertFolderInfo(ctx, tx, tenantId, root, false)
if err != nil {
return err
}
for _, folder := range lost {
err = insertFolderInfo(ctx, tx, tenant, folder, true)
err = insertFolderInfo(ctx, tx, tenantId, folder, true)
if err != nil {
return err
}
@ -123,9 +121,9 @@ func setMPTTOrder(folder *folderInfo, stack []*folderInfo, idx int32) (int32, er
folder.stack = stack
if folder.depth > 0 {
folder.Slug = "/"
folder.SlugPath = "/"
for _, f := range stack {
folder.Slug += f.originalSlug + "/"
folder.SlugPath += f.Slug + "/"
}
}
@ -139,17 +137,16 @@ func setMPTTOrder(folder *folderInfo, stack []*folderInfo, idx int32) (int32, er
return folder.right, nil
}
func insertFolderInfo(ctx context.Context, tx *session.SessionTx, tenant int64, folder *folderInfo, isDetached bool) error {
func insertFolderInfo(ctx context.Context, tx *session.SessionTx, tenantId int64, folder *folderInfo, isDetached bool) error {
js, _ := json.Marshal(folder.stack)
grn2 := grn.GRN{TenantID: tenant, ResourceKind: entity.StandardKindFolder, ResourceIdentifier: folder.UID}
_, err := tx.Exec(ctx,
`INSERT INTO entity_folder `+
"(grn, tenant_id, uid, slug_path, tree, depth, left, right, detached) "+
"(guid, tenant_id, uid, slug_path, tree, depth, lft, rgt, detached) "+
`VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
grn2.ToGRNString(),
tenant,
folder.Guid,
tenantId,
folder.UID,
folder.Slug,
folder.SlugPath,
string(js),
folder.depth,
folder.left,
@ -161,7 +158,7 @@ func insertFolderInfo(ctx context.Context, tx *session.SessionTx, tenant int64,
}
for _, sub := range folder.children {
err := insertFolderInfo(ctx, tx, tenant, sub, isDetached)
err := insertFolderInfo(ctx, tx, tenantId, sub, isDetached)
if err != nil {
return err
}

View File

@ -12,9 +12,9 @@ import (
func TestFolderSupport(t *testing.T) {
root, lost, err := buildFolderTree([]*folderInfo{
{UID: "A", parentUID: "", Name: "A", originalSlug: "a"},
{UID: "AA", parentUID: "A", Name: "AA", originalSlug: "aa"},
{UID: "B", parentUID: "", Name: "B", originalSlug: "b"},
{Guid: "GA", UID: "A", parentUID: "", Name: "A", Slug: "a"},
{Guid: "GAA", UID: "AA", parentUID: "A", Name: "AA", Slug: "aa"},
{Guid: "GB", UID: "B", parentUID: "", Name: "B", Slug: "b"},
})
require.NoError(t, err)
require.NotNil(t, root)
@ -51,7 +51,7 @@ func appendFolder(folder *folderInfo, frame *data.Frame) {
frame.AppendRow(
folder.UID,
folder.Name,
folder.Slug,
folder.SlugPath,
folder.depth,
folder.left,
folder.right,

File diff suppressed because it is too large Load Diff

View File

@ -1,116 +0,0 @@
package sqlstash
import (
"encoding/json"
"github.com/grafana/grafana/pkg/infra/grn"
"github.com/grafana/grafana/pkg/services/store/entity"
)
type summarySupport struct {
model *entity.EntitySummary
name string
description *string // null or empty
slug *string // null or empty
labels *string
fields *string
errors *string // should not allow saving with this!
marshaled []byte
// metadata for nested objects
parent_grn *grn.GRN
folder string
isNested bool // set when this is for a nested item
}
func newSummarySupport(summary *entity.EntitySummary) (*summarySupport, error) {
var err error
var js []byte
s := &summarySupport{
model: summary,
}
if summary != nil {
s.marshaled, err = json.Marshal(summary)
if err != nil {
return s, err
}
s.name = summary.Name
if summary.Description != "" {
s.description = &summary.Description
}
if summary.Slug != "" {
s.slug = &summary.Slug
}
if len(summary.Labels) > 0 {
js, err = json.Marshal(summary.Labels)
if err != nil {
return s, err
}
str := string(js)
s.labels = &str
}
if len(summary.Fields) > 0 {
js, err = json.Marshal(summary.Fields)
if err != nil {
return s, err
}
str := string(js)
s.fields = &str
}
if summary.Error != nil {
js, err = json.Marshal(summary.Error)
if err != nil {
return s, err
}
str := string(js)
s.errors = &str
}
}
return s, err
}
func (s summarySupport) toEntitySummary() (*entity.EntitySummary, error) {
var err error
summary := &entity.EntitySummary{
Name: s.name,
}
if s.description != nil {
summary.Description = *s.description
}
if s.slug != nil {
summary.Slug = *s.slug
}
if s.labels != nil {
b := []byte(*s.labels)
err = json.Unmarshal(b, &summary.Labels)
if err != nil {
return summary, err
}
}
if s.fields != nil {
b := []byte(*s.fields)
err = json.Unmarshal(b, &summary.Fields)
if err != nil {
return summary, err
}
}
if s.errors != nil {
b := []byte(*s.errors)
err = json.Unmarshal(b, &summary.Error)
if err != nil {
return summary, err
}
}
return summary, err
}
func (s *summarySupport) getParentGRN() *string {
if s.isNested {
t := s.parent_grn.ToGRNString()
return &t
}
return nil
}

View File

@ -3,16 +3,16 @@
// Frame[0]
// Name:
// Dimensions: 7 Fields by 4 Rows
// +----------------+----------------+----------------+---------------+---------------+---------------+--------------------------------------------------------------------------------+
// | Name: UID | Name: name | Name: slug | Name: depth | Name: left | Name: right | Name: tree |
// | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: |
// | Type: []string | Type: []string | Type: []string | Type: []int32 | Type: []int32 | Type: []int32 | Type: []json.RawMessage |
// +----------------+----------------+----------------+---------------+---------------+---------------+--------------------------------------------------------------------------------+
// | | Root | | 0 | 1 | 8 | [] |
// | A | A | /a/ | 1 | 2 | 5 | [{"uid":"A","name":"A","slug":"/a/"}] |
// | AA | AA | /a/aa/ | 2 | 3 | 4 | [{"uid":"A","name":"A","slug":"/a/"},{"uid":"AA","name":"AA","slug":"/a/aa/"}] |
// | B | B | /b/ | 1 | 6 | 7 | [{"uid":"B","name":"B","slug":"/b/"}] |
// +----------------+----------------+----------------+---------------+---------------+---------------+--------------------------------------------------------------------------------+
// +----------------+----------------+----------------+---------------+---------------+---------------+---------------------------------------------------------------------------------------------------------+
// | Name: UID | Name: name | Name: slug | Name: depth | Name: left | Name: right | Name: tree |
// | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: |
// | Type: []string | Type: []string | Type: []string | Type: []int32 | Type: []int32 | Type: []int32 | Type: []json.RawMessage |
// +----------------+----------------+----------------+---------------+---------------+---------------+---------------------------------------------------------------------------------------------------------+
// | | Root | | 0 | 1 | 8 | [] |
// | A | A | /a/ | 1 | 2 | 5 | [{"guid":"GA","uid":"A","name":"A","slug":"/a/"}] |
// | AA | AA | /a/aa/ | 2 | 3 | 4 | [{"guid":"GA","uid":"A","name":"A","slug":"/a/"},{"guid":"GAA","uid":"AA","name":"AA","slug":"/a/aa/"}] |
// | B | B | /b/ | 1 | 6 | 7 | [{"guid":"GB","uid":"B","name":"B","slug":"/b/"}] |
// +----------------+----------------+----------------+---------------+---------------+---------------+---------------------------------------------------------------------------------------------------------+
//
//
// 🌟 This was machine generated. Do not edit. 🌟
@ -115,6 +115,7 @@
[],
[
{
"guid": "GA",
"uid": "A",
"name": "A",
"slug": "/a/"
@ -122,11 +123,13 @@
],
[
{
"guid": "GA",
"uid": "A",
"name": "A",
"slug": "/a/"
},
{
"guid": "GAA",
"uid": "AA",
"name": "AA",
"slug": "/a/aa/"
@ -134,6 +137,7 @@
],
[
{
"guid": "GB",
"uid": "B",
"name": "B",
"slug": "/b/"

View File

@ -5,7 +5,11 @@ import (
"encoding/hex"
)
func createContentsHash(contents []byte) string {
hash := md5.Sum(contents)
func createContentsHash(body []byte, meta []byte, status []byte) string {
h := md5.New()
_, _ = h.Write(meta)
_, _ = h.Write(body)
_, _ = h.Write(status)
hash := h.Sum(nil)
return hex.EncodeToString(hash[:])
}

View File

@ -64,7 +64,7 @@ func createTestContext(t *testing.T) testContext {
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{
featuremgmt.FlagGrpcServer,
featuremgmt.FlagEntityStore,
featuremgmt.FlagUnifiedStorage,
},
AppModeProduction: false, // required for migrations to run
GRPCServerAddress: "127.0.0.1:0", // :0 for choosing the port automatically

View File

@ -96,7 +96,7 @@ func requireEntityMatch(t *testing.T, obj *entity.Entity, m rawEntityMatcher) {
require.True(t, len(mismatches) == 0, mismatches)
}
func requireVersionMatch(t *testing.T, obj *entity.EntityVersionInfo, m objectVersionMatcher) {
func requireVersionMatch(t *testing.T, obj *entity.Entity, m objectVersionMatcher) {
t.Helper()
mismatches := ""
@ -154,9 +154,11 @@ func TestIntegrationEntityServer(t *testing.T) {
t.Run("should be able to read persisted objects", func(t *testing.T) {
before := time.Now()
writeReq := &entity.WriteEntityRequest{
GRN: testGrn,
Body: body,
Comment: "first entity!",
Entity: &entity.Entity{
GRN: testGrn,
Body: body,
Message: "first entity!",
},
}
writeResp, err := testCtx.client.Write(ctx, writeReq)
require.NoError(t, err)
@ -165,7 +167,7 @@ func TestIntegrationEntityServer(t *testing.T) {
updatedRange: []time.Time{before, time.Now()},
updatedBy: fakeUser,
version: &firstVersion,
comment: &writeReq.Comment,
comment: &writeReq.Entity.Message,
}
requireVersionMatch(t, writeResp.Entity, versionMatcher)
@ -175,7 +177,6 @@ func TestIntegrationEntityServer(t *testing.T) {
WithBody: true,
})
require.NoError(t, err)
require.Nil(t, readResp.SummaryJson)
require.NotNil(t, readResp)
foundGRN := readResp.GRN
@ -200,7 +201,7 @@ func TestIntegrationEntityServer(t *testing.T) {
PreviousVersion: writeResp.Entity.Version,
})
require.NoError(t, err)
require.True(t, deleteResp.OK)
require.Equal(t, deleteResp.Status, entity.DeleteEntityResponse_DELETED)
readRespAfterDelete, err := testCtx.client.Read(ctx, &entity.ReadEntityRequest{
GRN: testGrn,
@ -219,9 +220,11 @@ func TestIntegrationEntityServer(t *testing.T) {
}
writeReq1 := &entity.WriteEntityRequest{
GRN: testGrn,
Body: body,
Comment: "first entity!",
Entity: &entity.Entity{
GRN: testGrn,
Body: body,
Message: "first entity!",
},
}
writeResp1, err := testCtx.client.Write(ctx, writeReq1)
require.NoError(t, err)
@ -230,9 +233,11 @@ func TestIntegrationEntityServer(t *testing.T) {
body2 := []byte("{\"name\":\"John2\"}")
writeReq2 := &entity.WriteEntityRequest{
GRN: testGrn,
Body: body2,
Comment: "update1",
Entity: &entity.Entity{
GRN: testGrn,
Body: body2,
Message: "update1",
},
}
writeResp2, err := testCtx.client.Write(ctx, writeReq2)
require.NoError(t, err)
@ -248,9 +253,11 @@ func TestIntegrationEntityServer(t *testing.T) {
body3 := []byte("{\"name\":\"John3\"}")
writeReq3 := &entity.WriteEntityRequest{
GRN: testGrn,
Body: body3,
Comment: "update3",
Entity: &entity.Entity{
GRN: testGrn,
Body: body3,
Message: "update3",
},
}
writeResp3, err := testCtx.client.Write(ctx, writeReq3)
require.NoError(t, err)
@ -271,7 +278,6 @@ func TestIntegrationEntityServer(t *testing.T) {
WithBody: true,
})
require.NoError(t, err)
require.Nil(t, readRespLatest.SummaryJson)
requireEntityMatch(t, readRespLatest, latestMatcher)
readRespFirstVer, err := testCtx.client.Read(ctx, &entity.ReadEntityRequest{
@ -281,7 +287,6 @@ func TestIntegrationEntityServer(t *testing.T) {
})
require.NoError(t, err)
require.Nil(t, readRespFirstVer.SummaryJson)
require.NotNil(t, readRespFirstVer)
requireEntityMatch(t, readRespFirstVer, rawEntityMatcher{
grn: testGrn,
@ -297,7 +302,7 @@ func TestIntegrationEntityServer(t *testing.T) {
GRN: testGrn,
})
require.NoError(t, err)
require.Equal(t, []*entity.EntityVersionInfo{
require.Equal(t, []*entity.Entity{
writeResp3.Entity,
writeResp2.Entity,
writeResp1.Entity,
@ -308,58 +313,66 @@ func TestIntegrationEntityServer(t *testing.T) {
PreviousVersion: writeResp3.Entity.Version,
})
require.NoError(t, err)
require.True(t, deleteResp.OK)
require.Equal(t, deleteResp.Status, entity.DeleteEntityResponse_DELETED)
})
t.Run("should be able to search for objects", func(t *testing.T) {
t.Run("should be able to list objects", func(t *testing.T) {
uid2 := "uid2"
uid3 := "uid3"
uid4 := "uid4"
kind2 := entity.StandardKindPlaylist
w1, err := testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: testGrn,
Body: body,
Entity: &entity.Entity{
GRN: testGrn,
Body: body,
},
})
require.NoError(t, err)
w2, err := testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: &grn.GRN{
ResourceIdentifier: uid2,
ResourceKind: kind,
Entity: &entity.Entity{
GRN: &grn.GRN{
ResourceIdentifier: uid2,
ResourceKind: kind,
},
Body: body,
},
Body: body,
})
require.NoError(t, err)
w3, err := testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: &grn.GRN{
ResourceIdentifier: uid3,
ResourceKind: kind2,
Entity: &entity.Entity{
GRN: &grn.GRN{
ResourceIdentifier: uid3,
ResourceKind: kind2,
},
Body: body,
},
Body: body,
})
require.NoError(t, err)
w4, err := testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: &grn.GRN{
ResourceIdentifier: uid4,
ResourceKind: kind2,
Entity: &entity.Entity{
GRN: &grn.GRN{
ResourceIdentifier: uid4,
ResourceKind: kind2,
},
Body: body,
},
Body: body,
})
require.NoError(t, err)
search, err := testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err := testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind, kind2},
WithBody: false,
})
require.NoError(t, err)
require.NotNil(t, search)
uids := make([]string, 0, len(search.Results))
kinds := make([]string, 0, len(search.Results))
version := make([]string, 0, len(search.Results))
for _, res := range search.Results {
require.NotNil(t, resp)
uids := make([]string, 0, len(resp.Results))
kinds := make([]string, 0, len(resp.Results))
version := make([]string, 0, len(resp.Results))
for _, res := range resp.Results {
uids = append(uids, res.GRN.ResourceIdentifier)
kinds = append(kinds, res.GRN.ResourceKind)
version = append(version, res.Version)
@ -374,14 +387,14 @@ func TestIntegrationEntityServer(t *testing.T) {
}, version)
// Again with only one kind
searchKind1, err := testCtx.client.Search(ctx, &entity.EntitySearchRequest{
respKind1, err := testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
})
require.NoError(t, err)
uids = make([]string, 0, len(searchKind1.Results))
kinds = make([]string, 0, len(searchKind1.Results))
version = make([]string, 0, len(searchKind1.Results))
for _, res := range searchKind1.Results {
uids = make([]string, 0, len(respKind1.Results))
kinds = make([]string, 0, len(respKind1.Results))
version = make([]string, 0, len(respKind1.Results))
for _, res := range respKind1.Results {
uids = append(uids, res.GRN.ResourceIdentifier)
kinds = append(kinds, res.GRN.ResourceKind)
version = append(version, res.Version)
@ -397,24 +410,28 @@ func TestIntegrationEntityServer(t *testing.T) {
t.Run("should be able to filter objects based on their labels", func(t *testing.T) {
kind := entity.StandardKindDashboard
_, err := testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: &grn.GRN{
ResourceKind: kind,
ResourceIdentifier: "blue-green",
Entity: &entity.Entity{
GRN: &grn.GRN{
ResourceKind: kind,
ResourceIdentifier: "blue-green",
},
Body: []byte(dashboardWithTagsBlueGreen),
},
Body: []byte(dashboardWithTagsBlueGreen),
})
require.NoError(t, err)
_, err = testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: &grn.GRN{
ResourceKind: kind,
ResourceIdentifier: "red-green",
Entity: &entity.Entity{
GRN: &grn.GRN{
ResourceKind: kind,
ResourceIdentifier: "red-green",
},
Body: []byte(dashboardWithTagsRedGreen),
},
Body: []byte(dashboardWithTagsRedGreen),
})
require.NoError(t, err)
search, err := testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err := testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
WithBody: false,
WithLabels: true,
@ -423,11 +440,11 @@ func TestIntegrationEntityServer(t *testing.T) {
},
})
require.NoError(t, err)
require.NotNil(t, search)
require.Len(t, search.Results, 1)
require.Equal(t, search.Results[0].GRN.ResourceIdentifier, "red-green")
require.NotNil(t, resp)
require.Len(t, resp.Results, 1)
require.Equal(t, resp.Results[0].GRN.ResourceIdentifier, "red-green")
search, err = testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
WithBody: false,
WithLabels: true,
@ -437,11 +454,11 @@ func TestIntegrationEntityServer(t *testing.T) {
},
})
require.NoError(t, err)
require.NotNil(t, search)
require.Len(t, search.Results, 1)
require.Equal(t, search.Results[0].GRN.ResourceIdentifier, "red-green")
require.NotNil(t, resp)
require.Len(t, resp.Results, 1)
require.Equal(t, resp.Results[0].GRN.ResourceIdentifier, "red-green")
search, err = testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
WithBody: false,
WithLabels: true,
@ -450,10 +467,10 @@ func TestIntegrationEntityServer(t *testing.T) {
},
})
require.NoError(t, err)
require.NotNil(t, search)
require.Len(t, search.Results, 0)
require.NotNil(t, resp)
require.Len(t, resp.Results, 0)
search, err = testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
WithBody: false,
WithLabels: true,
@ -462,10 +479,10 @@ func TestIntegrationEntityServer(t *testing.T) {
},
})
require.NoError(t, err)
require.NotNil(t, search)
require.Len(t, search.Results, 2)
require.NotNil(t, resp)
require.Len(t, resp.Results, 2)
search, err = testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
WithBody: false,
WithLabels: true,
@ -474,7 +491,7 @@ func TestIntegrationEntityServer(t *testing.T) {
},
})
require.NoError(t, err)
require.NotNil(t, search)
require.Len(t, search.Results, 0)
require.NotNil(t, resp)
require.Len(t, resp.Results, 0)
})
}

View File

@ -3,10 +3,7 @@ package entity
// The admin request is a superset of write request features
func ToAdminWriteEntityRequest(req *WriteEntityRequest) *AdminWriteEntityRequest {
return &AdminWriteEntityRequest{
GRN: req.GRN,
Body: req.Body,
Folder: req.Folder,
Comment: req.Comment,
Entity: req.Entity,
PreviousVersion: req.PreviousVersion,
}
}

View File

@ -19,7 +19,6 @@ import (
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/quota"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/store/entity/migrations"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
)
@ -104,10 +103,6 @@ func ProvideService(
grafanaStorageLogger.Warn("Error loading storage config", "error", err)
}
if err := migrations.MigrateEntityStore(sql, features); err != nil {
return nil, err
}
// always exists
globalRoots := []storageRuntime{
newDiskStorage(RootStorageMeta{

View File

@ -131,6 +131,11 @@ func (c *K8sResourceClient) SanitizeJSON(v *unstructured.Unstructured) string {
if anno["grafana.app/updatedTimestamp"] != "" {
anno["grafana.app/updatedTimestamp"] = "${updatedTimestamp}"
}
// Remove annotations that are not added by legacy storage
delete(anno, "grafana.app/originTimestamp")
delete(anno, "grafana.app/createdBy")
delete(anno, "grafana.app/updatedBy")
deep.SetAnnotations(anno)
copy := deep.Object
meta, ok := copy["metadata"].(map[string]any)

View File

@ -94,6 +94,19 @@ func TestPlaylist(t *testing.T) {
}))
})
t.Run("with dual write (unified storage)", func(t *testing.T) {
doPlaylistTests(t, apis.NewK8sTestHelper(t, testinfra.GrafanaOpts{
AppModeProduction: false, // required for unified storage
DisableAnonymous: true,
APIServerStorageType: "unified", // use the entity api tables
EnableFeatureToggles: []string{
featuremgmt.FlagUnifiedStorage,
featuremgmt.FlagGrafanaAPIServer,
featuremgmt.FlagKubernetesPlaylists, // Required so that legacy calls are also written
},
}))
})
t.Run("with dual write (etcd)", func(t *testing.T) {
// NOTE: running local etcd, that will be wiped clean!
t.Skip("local etcd testing")
@ -271,6 +284,7 @@ func doPlaylistTests(t *testing.T, helper *apis.K8sTestHelper) *apis.K8sTestHelp
Path: "/api/playlists/" + uid,
Body: []byte(legacyPayload),
}, &playlist.PlaylistDTO{})
require.Equal(t, 200, dtoResponse.Response.StatusCode)
require.Equal(t, uid, dtoResponse.Result.Uid)
require.Equal(t, "10m", dtoResponse.Result.Interval)
@ -280,12 +294,13 @@ func doPlaylistTests(t *testing.T, helper *apis.K8sTestHelper) *apis.K8sTestHelp
require.JSONEq(t, expectedResult, client.SanitizeJSON(found))
// Delete does not return anything
_ = apis.DoRequest(helper, apis.RequestParams{
deleteResponse := apis.DoRequest(helper, apis.RequestParams{
User: client.Args.User,
Method: http.MethodDelete,
Path: "/api/playlists/" + uid,
Body: []byte(legacyPayload),
}, &playlist.PlaylistDTO{}) // response is empty
require.Equal(t, 200, deleteResponse.Response.StatusCode)
found, err = client.Resource.Get(context.Background(), uid, metav1.GetOptions{})
statusError := helper.AsStatusError(err)