Storage: Unified Storage based on Entity API (#71977)

* first round of entityapi updates

- quote column names and clean up insert/update queries
- replace grn with guid
- streamline table structure

fixes

streamline entity history

move EntitySummary into proto

remove EntitySummary

add guid to json

fix tests

change DB_Uuid to DB_NVarchar

fix folder test

convert interface to any

more cleanup

start entity store under grafana-apiserver dskit target

CRUD working, kind of

rough cut of wiring entity api to kube-apiserver

fake grafana user in context

add key to entity

list working

revert unnecessary changes

move entity storage files to their own package, clean up

use accessor to read/write grafana annotations

implement separate Create and Update functions

* go mod tidy

* switch from Kind to resource

* basic grpc storage server

* basic support for grpc entity store

* don't connect to database unless it's needed, pass user identity over grpc

* support getting user from k8s context, fix some mysql issues

* assign owner to snowflake dependency

* switch from ulid to uuid for guids

* cleanup, rename Search to List

* remove entityListResult

* EntityAPI: remove extra user abstraction (#79033)

* remove extra user abstraction

* add test stub (but

* move grpc context setup into client wrapper, fix lint issue

* remove unused constants

* remove custom json stuff

* basic list filtering, add todo

* change target to storage-server, allow entityStore flag in prod mode

* fix issue with Update

* EntityAPI: make test work, need to resolve expected differences (#79123)

* make test work, need to resolve expected differences

* remove the fields not supported by legacy

* sanitize out the bits legacy does not support

* sanitize out the bits legacy does not support

---------

Co-authored-by: Ryan McKinley <ryantxu@gmail.com>

* update feature toggle generated files

* remove unused http headers

* update feature flag strategy

* devmode

* update readme

* spelling

* readme

---------

Co-authored-by: Ryan McKinley <ryantxu@gmail.com>
This commit is contained in:
Dan Cech
2023-12-06 21:21:21 +01:00
committed by GitHub
parent 07915703fe
commit c4c9bfaf2e
42 changed files with 4358 additions and 2389 deletions

View File

@@ -0,0 +1,110 @@
package entity
import (
context "context"
"strconv"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
status "google.golang.org/grpc/status"
"github.com/grafana/grafana/pkg/infra/appcontext"
)
var _ EntityStoreServer = (*entityStoreClientWrapper)(nil)
// wrapper for EntityStoreClient that implements EntityStore interface
type entityStoreClientWrapper struct {
EntityStoreClient
}
func (c *entityStoreClientWrapper) Read(ctx context.Context, in *ReadEntityRequest) (*Entity, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.Read(ctx, in)
}
func (c *entityStoreClientWrapper) BatchRead(ctx context.Context, in *BatchReadEntityRequest) (*BatchReadEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.BatchRead(ctx, in)
}
func (c *entityStoreClientWrapper) Write(ctx context.Context, in *WriteEntityRequest) (*WriteEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.Write(ctx, in)
}
func (c *entityStoreClientWrapper) Create(ctx context.Context, in *CreateEntityRequest) (*CreateEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.Create(ctx, in)
}
func (c *entityStoreClientWrapper) Update(ctx context.Context, in *UpdateEntityRequest) (*UpdateEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.Update(ctx, in)
}
func (c *entityStoreClientWrapper) Delete(ctx context.Context, in *DeleteEntityRequest) (*DeleteEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.Delete(ctx, in)
}
func (c *entityStoreClientWrapper) History(ctx context.Context, in *EntityHistoryRequest) (*EntityHistoryResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.History(ctx, in)
}
func (c *entityStoreClientWrapper) List(ctx context.Context, in *EntityListRequest) (*EntityListResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.List(ctx, in)
}
func (c *entityStoreClientWrapper) Watch(*EntityWatchRequest, EntityStore_WatchServer) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
func (c *entityStoreClientWrapper) wrapContext(ctx context.Context) (context.Context, error) {
user, err := appcontext.User(ctx)
if err != nil {
return nil, err
}
// set grpc metadata into the context to pass to the grpc server
ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs(
"grafana-idtoken", user.IDToken,
"grafana-userid", strconv.FormatInt(user.UserID, 10),
"grafana-orgid", strconv.FormatInt(user.OrgID, 10),
"grafana-login", user.Login,
))
return ctx, nil
}
// TEMPORARY... while we split this into a new service (see below)
func (c *entityStoreClientWrapper) AdminWrite(ctx context.Context, in *AdminWriteEntityRequest) (*WriteEntityResponse, error) {
ctx, err := c.wrapContext(ctx)
if err != nil {
return nil, err
}
return c.EntityStoreClient.AdminWrite(ctx, in)
}
func NewEntityStoreClientWrapper(cc grpc.ClientConnInterface) EntityStoreServer {
return &entityStoreClientWrapper{&entityStoreClient{cc}}
}

View File

@@ -0,0 +1,153 @@
package db
import (
"fmt"
"strings"
"time"
"github.com/jmoiron/sqlx"
"xorm.io/xorm"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/services/featuremgmt"
// "github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/sqlstore/session"
"github.com/grafana/grafana/pkg/services/store/entity/migrations"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
)
var _ sqlstash.EntityDB = (*EntityDB)(nil)
func ProvideEntityDB(db db.DB, cfg *setting.Cfg, features featuremgmt.FeatureToggles) (*EntityDB, error) {
return &EntityDB{
db: db,
cfg: cfg,
features: features,
}, nil
}
type EntityDB struct {
db db.DB
features featuremgmt.FeatureToggles
engine *xorm.Engine
cfg *setting.Cfg
}
func (db *EntityDB) Init() error {
_, err := db.GetEngine()
return err
}
func (db *EntityDB) GetEngine() (*xorm.Engine, error) {
if db.engine != nil {
return db.engine, nil
}
var engine *xorm.Engine
var err error
cfgSection := db.cfg.SectionWithEnvOverrides("entity_api")
dbType := cfgSection.Key("db_type").MustString("")
// if explicit connection settings are provided, use them
if dbType != "" {
dbHost := cfgSection.Key("db_host").MustString("")
dbName := cfgSection.Key("db_name").MustString("")
dbUser := cfgSection.Key("db_user").MustString("")
dbPass := cfgSection.Key("db_pass").MustString("")
if dbType == "postgres" {
// TODO: support all postgres connection options
dbSslMode := cfgSection.Key("db_sslmode").MustString("disable")
addr, err := util.SplitHostPortDefault(dbHost, "127.0.0.1", "5432")
if err != nil {
return nil, fmt.Errorf("invalid host specifier '%s': %w", dbHost, err)
}
connectionString := fmt.Sprintf(
"user=%s password=%s host=%s port=%s dbname=%s sslmode=%s", // sslcert=%s sslkey=%s sslrootcert=%s",
dbUser, dbPass, addr.Host, addr.Port, dbName, dbSslMode, // ss.dbCfg.ClientCertPath, ss.dbCfg.ClientKeyPath, ss.dbCfg.CaCertPath
)
engine, err = xorm.NewEngine("postgres", connectionString)
if err != nil {
return nil, err
}
_, err = engine.Exec("SET SESSION enable_experimental_alter_column_type_general=true")
if err != nil {
return nil, err
}
} else if dbType == "mysql" {
// TODO: support all mysql connection options
protocol := "tcp"
if strings.HasPrefix(dbHost, "/") {
protocol = "unix"
}
connectionString := fmt.Sprintf("%s:%s@%s(%s)/%s?collation=utf8mb4_unicode_ci&allowNativePasswords=true&clientFoundRows=true",
dbUser, dbPass, protocol, dbHost, dbName)
engine, err = xorm.NewEngine("mysql", connectionString)
if err != nil {
return nil, err
}
engine.SetMaxOpenConns(0)
engine.SetMaxIdleConns(2)
engine.SetConnMaxLifetime(time.Second * time.Duration(14400))
_, err = engine.Exec("SELECT 1")
if err != nil {
return nil, err
}
} else {
// TODO: sqlite support
return nil, fmt.Errorf("invalid db type specified: %s", dbType)
}
// configure sql logging
debugSQL := cfgSection.Key("log_queries").MustBool(false)
if !debugSQL {
engine.SetLogger(&xorm.DiscardLogger{})
} else {
// add stack to database calls to be able to see what repository initiated queries. Top 7 items from the stack as they are likely in the xorm library.
// engine.SetLogger(sqlstore.NewXormLogger(log.LvlInfo, log.WithSuffix(log.New("sqlstore.xorm"), log.CallerContextKey, log.StackCaller(log.DefaultCallerDepth))))
engine.ShowSQL(true)
engine.ShowExecTime(true)
}
// otherwise, try to use the grafana db connection
} else {
if db.db == nil {
return nil, fmt.Errorf("no db connection provided")
}
engine = db.db.GetEngine()
}
db.engine = engine
if err := migrations.MigrateEntityStore(db, db.features); err != nil {
db.engine = nil
return nil, err
}
return db.engine, nil
}
func (db *EntityDB) GetSession() (*session.SessionDB, error) {
engine, err := db.GetEngine()
if err != nil {
return nil, err
}
return session.GetSession(sqlx.NewDb(engine.DB().DB, engine.DriverName())), nil
}
func (db *EntityDB) GetCfg() *setting.Cfg {
return db.cfg
}

View File

@@ -25,6 +25,14 @@ func (i fakeEntityStore) Write(ctx context.Context, r *entity.WriteEntityRequest
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Create(ctx context.Context, r *entity.CreateEntityRequest) (*entity.CreateEntityResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Update(ctx context.Context, r *entity.UpdateEntityRequest) (*entity.UpdateEntityResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Read(ctx context.Context, r *entity.ReadEntityRequest) (*entity.Entity, error) {
return nil, fmt.Errorf("unimplemented")
}
@@ -41,10 +49,14 @@ func (i fakeEntityStore) History(ctx context.Context, r *entity.EntityHistoryReq
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Search(ctx context.Context, r *entity.EntitySearchRequest) (*entity.EntitySearchResponse, error) {
func (i fakeEntityStore) List(ctx context.Context, r *entity.EntityListRequest) (*entity.EntityListResponse, error) {
return nil, fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) Watch(*entity.EntityWatchRequest, entity.EntityStore_WatchServer) error {
return fmt.Errorf("unimplemented")
}
func (i fakeEntityStore) FindReferences(ctx context.Context, r *entity.ReferenceRequest) (*entity.EntityListResponse, error) {
return nil, fmt.Errorf("unimplemented")
}

File diff suppressed because it is too large Load Diff

View File

@@ -7,41 +7,75 @@ import "pkg/infra/grn/grn.proto";
// The canonical entity/document data -- this represents the raw bytes and storage level metadata
message Entity {
// Entity identifier -- tenant_id, kind, uid
grn.GRN GRN = 1;
// The version will change when the entity is saved. It is not necessarily sortable
// Globally unique ID set by the system. This can not be set explicitly
string guid = 1;
// The resourceVersion -- it will change whenever anythign on the object is saved
string version = 2;
// Time in epoch milliseconds that the entity was created
int64 created_at = 3;
// Entity identifier -- tenant_id, kind, uid
grn.GRN GRN = 3;
// Time in epoch milliseconds that the entity was updated
int64 updated_at = 4;
// group version of the entity
string group_version = 23;
// Who created the entity
string created_by = 5;
// k8s key value
string key = 22;
// Who updated the entity
string updated_by = 6;
// The folder UID
string folder = 4;
// The folder UID (not stored in the body)
string folder = 7;
// Raw meta from k8s
bytes meta = 5;
// MD5 digest of the body
string ETag = 8;
// Raw bytes of the storage entity. The kind will determine what is a valid payload
bytes body = 6;
// k8s style status (ignored for now)
bytes status = 7;
// the friendly name of the entity
string name = 8;
// Content Length
int64 size = 9;
// Raw bytes of the storage entity. The kind will determine what is a valid payload
bytes body = 10;
// MD5 digest of the body
string ETag = 10;
// Entity summary as JSON
bytes summary_json = 11;
// Time in epoch milliseconds that the entity was created
int64 created_at = 11;
// Who created the entity
string created_by = 12;
// Time in epoch milliseconds that the entity was updated
int64 updated_at = 13;
// Who updated the entity
string updated_by = 14;
// External location info
EntityOriginInfo origin = 12;
EntityOriginInfo origin = 15;
// human-readable description of the entity
string description = 16;
// URL safe version of the name. It will be unique within the folder
string slug = 17;
// Commit message (optional)
string message = 18;
// Key value pairs. Tags are are represented as keys with empty values
map<string,string> labels = 19;
// Optional field values. The schema will define and document possible values for a given kind
map<string, string> fields = 20;
// When errors exist
repeated EntityErrorInfo errors = 21;
}
// This stores additional metadata for items entities that were synced from external systmes
@@ -69,29 +103,6 @@ message EntityErrorInfo {
bytes details_json = 3;
}
// This is a subset of Entity that does not include body or sync info
message EntityVersionInfo {
// The version will change when the entity is saved. It is not necessarily sortable
string version = 1;
// Time in epoch milliseconds that the entity was updated
int64 updated_at = 2;
// Who updated the entity
string updated_by = 3;
// Content Length
int64 size = 4;
// MD5 digest of the body
string ETag = 5;
// optional "save" or "commit" message
//
// NOTE: currently managed by the dashboard_version table, and will be returned from a "history" command
string comment = 6;
}
//-----------------------------------------------
// Get request/response
//-----------------------------------------------
@@ -100,14 +111,22 @@ message ReadEntityRequest {
// Entity identifier
grn.GRN GRN = 1;
// Fetch an explicit version
string key = 7;
// Fetch an explicit version (default is latest)
string version = 2;
// Include the full meta bytes
bool with_meta = 3;
// Include the full body bytes
bool with_body = 3;
bool with_body = 4;
// Include the status bytes (ignored for now)
bool with_status = 5;
// Include derived summary metadata
bool with_summary = 4;
bool with_summary = 6;
}
//------------------------------------------------------
@@ -127,85 +146,38 @@ message BatchReadEntityResponse {
//-----------------------------------------------
message WriteEntityRequest {
// Entity identifier
grn.GRN GRN = 1;
// Where to save the entity (empty will leave it unchanged)
string folder = 2;
// The raw entity body
bytes body = 3;
// Message that can be seen when exploring entity history
string comment = 4;
// Entity details
Entity entity = 1;
// Used for optimistic locking. If missing, the previous version will be replaced regardless
string previous_version = 5;
string previous_version = 2;
}
// This operation is useful when syncing a resource from external sources
// that have more accurate metadata information (git, or an archive).
// This process can bypass the forced checks that
// This process can bypass the forced checks that
message AdminWriteEntityRequest {
// Entity identifier
grn.GRN GRN = 1;
// Where to save the entity (empty will leave it unchanged)
string folder = 2;
// The raw entity body
bytes body = 3;
// Message that can be seen when exploring entity history
string comment = 4;
// Time in epoch milliseconds that the entity was created
// Optional, if 0 it will use the current time
int64 created_at = 5;
// Time in epoch milliseconds that the entity was updated
// Optional, if empty it will use the current user
int64 updated_at = 6;
// Who created the entity
// Optional, if 0 it will use the current time
string created_by = 7;
// Who updated the entity
// Optional, if empty it will use the current user
string updated_by = 8;
// An explicit version identifier
// Optional, if set, this will overwrite/define an explicit version
string version = 9;
// Entity details
Entity entity = 1;
// Used for optimistic locking. If missing, the previous version will be replaced regardless
// This may not be used along with an explicit version in the request
string previous_version = 10;
string previous_version = 2;
// Request that all previous versions are removed from the history
// This will make sense for systems that manage history explicitly externallay
bool clear_history = 11;
// Optionally define where the entity came from
EntityOriginInfo origin = 12;
bool clear_history = 3;
}
message WriteEntityResponse {
// Error info -- if exists, the save did not happen
EntityErrorInfo error = 1;
// Entity identifier
grn.GRN GRN = 2;
// Entity details with the body removed
EntityVersionInfo entity = 3;
// Entity summary as JSON
bytes summary_json = 4;
// Entity details
Entity entity = 2;
// Status code
Status status = 5;
Status status = 3;
// Status enumeration
enum Status {
@@ -216,6 +188,62 @@ message WriteEntityResponse {
}
}
//-----------------------------------------------
// Create request/response
//-----------------------------------------------
message CreateEntityRequest {
// Entity details
Entity entity = 1;
}
message CreateEntityResponse {
// Error info -- if exists, the save did not happen
EntityErrorInfo error = 1;
// Entity details
Entity entity = 2;
// Status code
Status status = 3;
// Status enumeration
enum Status {
ERROR = 0;
CREATED = 1;
}
}
//-----------------------------------------------
// Update request/response
//-----------------------------------------------
message UpdateEntityRequest {
// Entity details
Entity entity = 1;
// Used for optimistic locking. If missing, the previous version will be replaced regardless
string previous_version = 2;
}
message UpdateEntityResponse {
// Error info -- if exists, the save did not happen
EntityErrorInfo error = 1;
// Entity details
Entity entity = 2;
// Status code
Status status = 3;
// Status enumeration
enum Status {
ERROR = 0;
UPDATED = 1;
UNCHANGED = 2;
}
}
//-----------------------------------------------
// Delete request/response
//-----------------------------------------------
@@ -224,12 +252,28 @@ message DeleteEntityRequest {
// Entity identifier
grn.GRN GRN = 1;
// Used for optimistic locking. If missing, the previous version will be replaced regardless
string previous_version = 3;
string key = 3;
// Used for optimistic locking. If missing, the current version will be deleted regardless
string previous_version = 2;
}
message DeleteEntityResponse {
bool OK = 1;
// Error info -- if exists, the delete did not happen
EntityErrorInfo error = 1;
// Entity details
Entity entity = 2;
// Status code
Status status = 3;
// Status enumeration
enum Status {
ERROR = 0;
DELETED = 1;
NOTFOUND = 2;
}
}
//-----------------------------------------------
@@ -241,7 +285,7 @@ message EntityHistoryRequest {
grn.GRN GRN = 1;
// Maximum number of items to return
int64 limit = 3;
int64 limit = 3;
// Starting from the requested page
string next_page_token = 5;
@@ -252,7 +296,7 @@ message EntityHistoryResponse {
grn.GRN GRN = 1;
// Entity metadata without the raw bytes
repeated EntityVersionInfo versions = 2;
repeated Entity versions = 2;
// More results exist... pass this in the next request
string next_page_token = 3;
@@ -263,12 +307,12 @@ message EntityHistoryResponse {
// List request/response
//-----------------------------------------------
message EntitySearchRequest {
message EntityListRequest {
// Starting from the requested page (other query parameters must match!)
string next_page_token = 1;
// Maximum number of items to return
int64 limit = 2;
int64 limit = 2;
// Free text query string -- mileage may vary :)
string query = 3;
@@ -276,6 +320,9 @@ message EntitySearchRequest {
// limit to a specific kind (empty is all)
repeated string kind = 4;
// limit to a specific key
repeated string key = 11;
// Limit results to items in a specific folder
string folder = 5;
@@ -295,54 +342,22 @@ message EntitySearchRequest {
bool with_fields = 10;
}
// Search result metadata for each entity
message EntitySearchResult {
// Entity identifier
grn.GRN GRN = 1;
message ReferenceRequest {
// Starting from the requested page (other query parameters must match!)
string next_page_token = 1;
// The current version of this entity
string version = 2;
// Maximum number of items to return
int64 limit = 2;
// Content Length
int64 size = 3;
// Free text query string -- mileage may vary :)
string kind = 3;
// Time in epoch milliseconds that the entity was updated
int64 updated_at = 4;
// Who updated the entity
string updated_by = 5;
// Optionally include the full entity body
bytes body = 6;
//----------------------------------------
// Derived from body in the summary
//----------------------------------------
// Always included
string name = 7;
// Always included
string description = 8;
// The structured labels
map<string,string> labels = 9;
// Folder UID
string folder = 10;
// Slugified name
string slug = 11;
// Optionally include extracted JSON
bytes fields_json = 12;
// EntityErrorInfo in json
bytes error_json = 13;
// Free text query string -- mileage may vary :)
string uid = 4;
}
message EntitySearchResponse {
repeated EntitySearchResult results = 1;
message EntityListResponse {
repeated Entity results = 1;
// More results exist... pass this in the next request
string next_page_token = 2;
@@ -353,9 +368,9 @@ message EntitySearchResponse {
//-----------------------------------------------
message EntityWatchRequest {
// Timestamp of last changes. Empty will default to
int64 since = 1;
// Timestamp of last changes. Empty will default to
int64 since = 1;
// Watch sppecific entities
repeated grn.GRN GRN = 2;
@@ -380,7 +395,7 @@ message EntityWatchRequest {
message EntityWatchResponse {
// Timestamp the event was sent
int64 timestamp = 1;
int64 timestamp = 1;
// List of entities with the same action
repeated Entity entity = 2;
@@ -398,47 +413,47 @@ message EntityWatchResponse {
message EntitySummary {
string UID = 1;
string kind = 2;
string kind = 2;
string name = 3;
string description = 4;
string name = 3;
string description = 4;
// Key value pairs. Tags are are represented as keys with empty values
map<string,string> labels = 5;
// Key value pairs. Tags are are represented as keys with empty values
map<string,string> labels = 5;
// Parent folder UID
string folder = 6;
// Parent folder UID
string folder = 6;
// URL safe version of the name. It will be unique within the folder
string slug = 7;
// URL safe version of the name. It will be unique within the folder
string slug = 7;
// When errors exist
EntityErrorInfo error = 8;
// When errors exist
EntityErrorInfo error = 8;
// Optional field values. The schema will define and document possible values for a given kind
map<string, string> fields = 9;
// Optional field values. The schema will define and document possible values for a given kind
map<string, string> fields = 9;
// eg: panels within dashboard
repeated EntitySummary nested = 10;
// eg: panels within dashboard
repeated EntitySummary nested = 10;
// Optional references to external things
repeated EntityExternalReference references = 11;
// Optional references to external things
repeated EntityExternalReference references = 11;
}
message EntityExternalReference {
// Category of dependency
// eg: datasource, plugin, runtime
string family = 1;
// Category of dependency
// eg: datasource, plugin, runtime
string family = 1;
// datasource > prometheus|influx|...
// plugin > panel | datasource
// runtime > transformer
string type = 2;
// datasource > prometheus|influx|...
// plugin > panel | datasource
// runtime > transformer
string type = 2;
// datasource > UID
// plugin > plugin identifier
// runtime > name lookup
string identifier = 3;
// datasource > UID
// plugin > plugin identifier
// runtime > name lookup
string identifier = 3;
}
@@ -451,11 +466,13 @@ service EntityStore {
rpc Read(ReadEntityRequest) returns (Entity);
rpc BatchRead(BatchReadEntityRequest) returns (BatchReadEntityResponse);
rpc Write(WriteEntityRequest) returns (WriteEntityResponse);
rpc Create(CreateEntityRequest) returns (CreateEntityResponse);
rpc Update(UpdateEntityRequest) returns (UpdateEntityResponse);
rpc Delete(DeleteEntityRequest) returns (DeleteEntityResponse);
rpc History(EntityHistoryRequest) returns (EntityHistoryResponse);
rpc Search(EntitySearchRequest) returns (EntitySearchResponse);
rpc List(EntityListRequest) returns (EntityListResponse);
rpc Watch(EntityWatchRequest) returns (stream EntityWatchResponse);
// TEMPORARY... while we split this into a new service (see below)
rpc AdminWrite(AdminWriteEntityRequest) returns (WriteEntityResponse);
}

View File

@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.24.4
// - protoc v4.23.4
// source: entity.proto
package entity
@@ -22,9 +22,11 @@ const (
EntityStore_Read_FullMethodName = "/entity.EntityStore/Read"
EntityStore_BatchRead_FullMethodName = "/entity.EntityStore/BatchRead"
EntityStore_Write_FullMethodName = "/entity.EntityStore/Write"
EntityStore_Create_FullMethodName = "/entity.EntityStore/Create"
EntityStore_Update_FullMethodName = "/entity.EntityStore/Update"
EntityStore_Delete_FullMethodName = "/entity.EntityStore/Delete"
EntityStore_History_FullMethodName = "/entity.EntityStore/History"
EntityStore_Search_FullMethodName = "/entity.EntityStore/Search"
EntityStore_List_FullMethodName = "/entity.EntityStore/List"
EntityStore_Watch_FullMethodName = "/entity.EntityStore/Watch"
EntityStore_AdminWrite_FullMethodName = "/entity.EntityStore/AdminWrite"
)
@@ -36,9 +38,11 @@ type EntityStoreClient interface {
Read(ctx context.Context, in *ReadEntityRequest, opts ...grpc.CallOption) (*Entity, error)
BatchRead(ctx context.Context, in *BatchReadEntityRequest, opts ...grpc.CallOption) (*BatchReadEntityResponse, error)
Write(ctx context.Context, in *WriteEntityRequest, opts ...grpc.CallOption) (*WriteEntityResponse, error)
Create(ctx context.Context, in *CreateEntityRequest, opts ...grpc.CallOption) (*CreateEntityResponse, error)
Update(ctx context.Context, in *UpdateEntityRequest, opts ...grpc.CallOption) (*UpdateEntityResponse, error)
Delete(ctx context.Context, in *DeleteEntityRequest, opts ...grpc.CallOption) (*DeleteEntityResponse, error)
History(ctx context.Context, in *EntityHistoryRequest, opts ...grpc.CallOption) (*EntityHistoryResponse, error)
Search(ctx context.Context, in *EntitySearchRequest, opts ...grpc.CallOption) (*EntitySearchResponse, error)
List(ctx context.Context, in *EntityListRequest, opts ...grpc.CallOption) (*EntityListResponse, error)
Watch(ctx context.Context, in *EntityWatchRequest, opts ...grpc.CallOption) (EntityStore_WatchClient, error)
// TEMPORARY... while we split this into a new service (see below)
AdminWrite(ctx context.Context, in *AdminWriteEntityRequest, opts ...grpc.CallOption) (*WriteEntityResponse, error)
@@ -79,6 +83,24 @@ func (c *entityStoreClient) Write(ctx context.Context, in *WriteEntityRequest, o
return out, nil
}
func (c *entityStoreClient) Create(ctx context.Context, in *CreateEntityRequest, opts ...grpc.CallOption) (*CreateEntityResponse, error) {
out := new(CreateEntityResponse)
err := c.cc.Invoke(ctx, EntityStore_Create_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *entityStoreClient) Update(ctx context.Context, in *UpdateEntityRequest, opts ...grpc.CallOption) (*UpdateEntityResponse, error) {
out := new(UpdateEntityResponse)
err := c.cc.Invoke(ctx, EntityStore_Update_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *entityStoreClient) Delete(ctx context.Context, in *DeleteEntityRequest, opts ...grpc.CallOption) (*DeleteEntityResponse, error) {
out := new(DeleteEntityResponse)
err := c.cc.Invoke(ctx, EntityStore_Delete_FullMethodName, in, out, opts...)
@@ -97,9 +119,9 @@ func (c *entityStoreClient) History(ctx context.Context, in *EntityHistoryReques
return out, nil
}
func (c *entityStoreClient) Search(ctx context.Context, in *EntitySearchRequest, opts ...grpc.CallOption) (*EntitySearchResponse, error) {
out := new(EntitySearchResponse)
err := c.cc.Invoke(ctx, EntityStore_Search_FullMethodName, in, out, opts...)
func (c *entityStoreClient) List(ctx context.Context, in *EntityListRequest, opts ...grpc.CallOption) (*EntityListResponse, error) {
out := new(EntityListResponse)
err := c.cc.Invoke(ctx, EntityStore_List_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -154,9 +176,11 @@ type EntityStoreServer interface {
Read(context.Context, *ReadEntityRequest) (*Entity, error)
BatchRead(context.Context, *BatchReadEntityRequest) (*BatchReadEntityResponse, error)
Write(context.Context, *WriteEntityRequest) (*WriteEntityResponse, error)
Create(context.Context, *CreateEntityRequest) (*CreateEntityResponse, error)
Update(context.Context, *UpdateEntityRequest) (*UpdateEntityResponse, error)
Delete(context.Context, *DeleteEntityRequest) (*DeleteEntityResponse, error)
History(context.Context, *EntityHistoryRequest) (*EntityHistoryResponse, error)
Search(context.Context, *EntitySearchRequest) (*EntitySearchResponse, error)
List(context.Context, *EntityListRequest) (*EntityListResponse, error)
Watch(*EntityWatchRequest, EntityStore_WatchServer) error
// TEMPORARY... while we split this into a new service (see below)
AdminWrite(context.Context, *AdminWriteEntityRequest) (*WriteEntityResponse, error)
@@ -175,14 +199,20 @@ func (UnimplementedEntityStoreServer) BatchRead(context.Context, *BatchReadEntit
func (UnimplementedEntityStoreServer) Write(context.Context, *WriteEntityRequest) (*WriteEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Write not implemented")
}
func (UnimplementedEntityStoreServer) Create(context.Context, *CreateEntityRequest) (*CreateEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
}
func (UnimplementedEntityStoreServer) Update(context.Context, *UpdateEntityRequest) (*UpdateEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
}
func (UnimplementedEntityStoreServer) Delete(context.Context, *DeleteEntityRequest) (*DeleteEntityResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
}
func (UnimplementedEntityStoreServer) History(context.Context, *EntityHistoryRequest) (*EntityHistoryResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method History not implemented")
}
func (UnimplementedEntityStoreServer) Search(context.Context, *EntitySearchRequest) (*EntitySearchResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Search not implemented")
func (UnimplementedEntityStoreServer) List(context.Context, *EntityListRequest) (*EntityListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
}
func (UnimplementedEntityStoreServer) Watch(*EntityWatchRequest, EntityStore_WatchServer) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
@@ -256,6 +286,42 @@ func _EntityStore_Write_Handler(srv interface{}, ctx context.Context, dec func(i
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateEntityRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).Create(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_Create_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).Create(ctx, req.(*CreateEntityRequest))
}
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateEntityRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).Update(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_Update_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).Update(ctx, req.(*UpdateEntityRequest))
}
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteEntityRequest)
if err := dec(in); err != nil {
@@ -292,20 +358,20 @@ func _EntityStore_History_Handler(srv interface{}, ctx context.Context, dec func
return interceptor(ctx, in, info, handler)
}
func _EntityStore_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EntitySearchRequest)
func _EntityStore_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EntityListRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(EntityStoreServer).Search(ctx, in)
return srv.(EntityStoreServer).List(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: EntityStore_Search_FullMethodName,
FullMethod: EntityStore_List_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EntityStoreServer).Search(ctx, req.(*EntitySearchRequest))
return srv.(EntityStoreServer).List(ctx, req.(*EntityListRequest))
}
return interceptor(ctx, in, info, handler)
}
@@ -368,6 +434,14 @@ var EntityStore_ServiceDesc = grpc.ServiceDesc{
MethodName: "Write",
Handler: _EntityStore_Write_Handler,
},
{
MethodName: "Create",
Handler: _EntityStore_Create_Handler,
},
{
MethodName: "Update",
Handler: _EntityStore_Update_Handler,
},
{
MethodName: "Delete",
Handler: _EntityStore_Delete_Handler,
@@ -377,8 +451,8 @@ var EntityStore_ServiceDesc = grpc.ServiceDesc{
Handler: _EntityStore_History_Handler,
},
{
MethodName: "Search",
Handler: _EntityStore_Search_Handler,
MethodName: "List",
Handler: _EntityStore_List_Handler,
},
{
MethodName: "AdminWrite",

View File

@@ -1,303 +0,0 @@
package entity
import (
"encoding/base64"
"encoding/json"
"fmt"
"unsafe"
jsoniter "github.com/json-iterator/go"
"github.com/grafana/grafana/pkg/infra/grn"
)
func init() { //nolint:gochecknoinits
jsoniter.RegisterTypeEncoder("entity.EntitySearchResult", &searchResultCodec{})
jsoniter.RegisterTypeEncoder("entity.WriteEntityResponse", &writeResponseCodec{})
jsoniter.RegisterTypeEncoder("entity.Entity", &rawEntityCodec{})
jsoniter.RegisterTypeDecoder("entity.Entity", &rawEntityCodec{})
}
func writeRawJson(stream *jsoniter.Stream, val []byte) {
if json.Valid(val) {
_, _ = stream.Write(val)
} else {
stream.WriteString(string(val))
}
}
// Unlike the standard JSON marshal, this will write bytes as JSON when it can
type rawEntityCodec struct{}
func (obj *Entity) MarshalJSON() ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
return json.Marshal(obj)
}
// UnmarshalJSON will read JSON into a Entity
func (obj *Entity) UnmarshalJSON(b []byte) error {
if obj == nil {
return fmt.Errorf("unexpected nil for raw objcet")
}
iter := jsoniter.ParseBytes(jsoniter.ConfigDefault, b)
readEntity(iter, obj)
return iter.Error
}
func (codec *rawEntityCodec) IsEmpty(ptr unsafe.Pointer) bool {
f := (*Entity)(ptr)
return f.GRN == nil && f.Body == nil
}
func (codec *rawEntityCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) {
obj := (*Entity)(ptr)
stream.WriteObjectStart()
stream.WriteObjectField("GRN")
stream.WriteVal(obj.GRN)
if obj.Version != "" {
stream.WriteMore()
stream.WriteObjectField("version")
stream.WriteString(obj.Version)
}
if obj.CreatedAt > 0 {
stream.WriteMore()
stream.WriteObjectField("createdAt")
stream.WriteInt64(obj.CreatedAt)
}
if obj.UpdatedAt > 0 {
stream.WriteMore()
stream.WriteObjectField("updatedAt")
stream.WriteInt64(obj.UpdatedAt)
}
if obj.CreatedBy != "" {
stream.WriteMore()
stream.WriteObjectField("createdBy")
stream.WriteString(obj.CreatedBy)
}
if obj.UpdatedBy != "" {
stream.WriteMore()
stream.WriteObjectField("updatedBy")
stream.WriteString(obj.UpdatedBy)
}
if obj.Folder != "" {
stream.WriteMore()
stream.WriteObjectField("folder")
stream.WriteString(obj.Folder)
}
if obj.Body != nil {
stream.WriteMore()
if json.Valid(obj.Body) {
stream.WriteObjectField("body")
stream.WriteRaw(string(obj.Body)) // works for strings
} else {
sEnc := base64.StdEncoding.EncodeToString(obj.Body)
stream.WriteObjectField("body_base64")
stream.WriteString(sEnc) // works for strings
}
}
if len(obj.SummaryJson) > 0 {
stream.WriteMore()
stream.WriteObjectField("summary")
writeRawJson(stream, obj.SummaryJson)
}
if obj.ETag != "" {
stream.WriteMore()
stream.WriteObjectField("etag")
stream.WriteString(obj.ETag)
}
if obj.Size > 0 {
stream.WriteMore()
stream.WriteObjectField("size")
stream.WriteInt64(obj.Size)
}
if obj.Origin != nil {
stream.WriteMore()
stream.WriteObjectField("origin")
stream.WriteVal(obj.Origin)
}
stream.WriteObjectEnd()
}
func (codec *rawEntityCodec) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
*(*Entity)(ptr) = Entity{}
raw := (*Entity)(ptr)
readEntity(iter, raw)
}
func readEntity(iter *jsoniter.Iterator, raw *Entity) {
for l1Field := iter.ReadObject(); l1Field != ""; l1Field = iter.ReadObject() {
switch l1Field {
case "GRN":
raw.GRN = &grn.GRN{}
iter.ReadVal(raw.GRN)
case "updatedAt":
raw.UpdatedAt = iter.ReadInt64()
case "updatedBy":
raw.UpdatedBy = iter.ReadString()
case "createdAt":
raw.CreatedAt = iter.ReadInt64()
case "createdBy":
raw.CreatedBy = iter.ReadString()
case "size":
raw.Size = iter.ReadInt64()
case "etag":
raw.ETag = iter.ReadString()
case "version":
raw.Version = iter.ReadString()
case "folder":
raw.Folder = iter.ReadString()
case "origin":
raw.Origin = &EntityOriginInfo{}
iter.ReadVal(raw.Origin)
case "summary":
var val interface{}
iter.ReadVal(&val) // ??? is there a smarter way to just keep the underlying bytes without read+marshal
body, err := json.Marshal(val)
if err != nil {
iter.ReportError("raw entity", "error reading summary body")
return
}
raw.SummaryJson = body
case "body":
var val interface{}
iter.ReadVal(&val) // ??? is there a smarter way to just keep the underlying bytes without read+marshal
body, err := json.Marshal(val)
if err != nil {
iter.ReportError("raw entity", "error creating json from body")
return
}
raw.Body = body
case "body_base64":
val := iter.ReadString()
body, err := base64.StdEncoding.DecodeString(val)
if err != nil {
iter.ReportError("raw entity", "error decoding base64 body")
return
}
raw.Body = body
default:
iter.ReportError("raw object", "unexpected field: "+l1Field)
return
}
}
}
// Unlike the standard JSON marshal, this will write bytes as JSON when it can
type searchResultCodec struct{}
func (obj *EntitySearchResult) MarshalJSON() ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
return json.Marshal(obj)
}
func (codec *searchResultCodec) IsEmpty(ptr unsafe.Pointer) bool {
f := (*EntitySearchResult)(ptr)
return f.GRN == nil && f.Body == nil
}
func (codec *searchResultCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) {
obj := (*EntitySearchResult)(ptr)
stream.WriteObjectStart()
stream.WriteObjectField("GRN")
stream.WriteVal(obj.GRN)
if obj.Name != "" {
stream.WriteMore()
stream.WriteObjectField("name")
stream.WriteString(obj.Name)
}
if obj.Description != "" {
stream.WriteMore()
stream.WriteObjectField("description")
stream.WriteString(obj.Description)
}
if obj.Size > 0 {
stream.WriteMore()
stream.WriteObjectField("size")
stream.WriteInt64(obj.Size)
}
if obj.UpdatedAt > 0 {
stream.WriteMore()
stream.WriteObjectField("updatedAt")
stream.WriteInt64(obj.UpdatedAt)
}
if obj.UpdatedBy != "" {
stream.WriteMore()
stream.WriteObjectField("updatedBy")
stream.WriteVal(obj.UpdatedBy)
}
if obj.Body != nil {
stream.WriteMore()
if json.Valid(obj.Body) {
stream.WriteObjectField("body")
_, _ = stream.Write(obj.Body) // works for strings
} else {
stream.WriteObjectField("body_base64")
stream.WriteVal(obj.Body) // works for strings
}
}
if obj.Labels != nil {
stream.WriteMore()
stream.WriteObjectField("labels")
stream.WriteVal(obj.Labels)
}
if obj.ErrorJson != nil {
stream.WriteMore()
stream.WriteObjectField("error")
writeRawJson(stream, obj.ErrorJson)
}
if obj.FieldsJson != nil {
stream.WriteMore()
stream.WriteObjectField("fields")
writeRawJson(stream, obj.FieldsJson)
}
stream.WriteObjectEnd()
}
// Unlike the standard JSON marshal, this will write bytes as JSON when it can
type writeResponseCodec struct{}
func (obj *WriteEntityResponse) MarshalJSON() ([]byte, error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
return json.Marshal(obj)
}
func (codec *writeResponseCodec) IsEmpty(ptr unsafe.Pointer) bool {
f := (*WriteEntityResponse)(ptr)
return f == nil
}
func (codec *writeResponseCodec) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) {
obj := (*WriteEntityResponse)(ptr)
stream.WriteObjectStart()
stream.WriteObjectField("status")
stream.WriteString(obj.Status.String())
if obj.Error != nil {
stream.WriteMore()
stream.WriteObjectField("error")
stream.WriteVal(obj.Error)
}
if obj.GRN != nil {
stream.WriteMore()
stream.WriteObjectField("GRN")
stream.WriteVal(obj.GRN)
}
if obj.Entity != nil {
stream.WriteMore()
stream.WriteObjectField("entity")
stream.WriteVal(obj.Entity)
}
if len(obj.SummaryJson) > 0 {
stream.WriteMore()
stream.WriteObjectField("summary")
writeRawJson(stream, obj.SummaryJson)
}
stream.WriteObjectEnd()
}

View File

@@ -1,50 +0,0 @@
package entity
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/infra/grn"
)
func TestRawEncoders(t *testing.T) {
body, err := json.Marshal(map[string]any{
"hello": "world",
"field": 1.23,
})
require.NoError(t, err)
raw := &Entity{
GRN: &grn.GRN{
ResourceIdentifier: "a",
ResourceKind: "b",
},
Version: "c",
ETag: "d",
Body: body,
}
b, err := json.MarshalIndent(raw, "", " ")
require.NoError(t, err)
str := string(b)
require.JSONEq(t, `{
"GRN": {
"ResourceKind": "b",
"ResourceIdentifier": "a"
},
"version": "c",
"body": {
"field": 1.23,
"hello": "world"
},
"etag": "d"
}`, str)
copy := &Entity{}
err = json.Unmarshal(b, copy)
require.NoError(t, err)
}

View File

@@ -6,64 +6,116 @@ import (
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
)
func getLatinPathColumn(name string) *migrator.Column {
return &migrator.Column{
Name: name,
Type: migrator.DB_NVarchar,
Length: 1024,
Nullable: false,
IsLatin: true, // only used in MySQL
}
}
func initEntityTables(mg *migrator.Migrator) string {
marker := "Initialize entity tables (v005)" // changing this key wipe+rewrite everything
mg.AddMigration(marker, &migrator.RawSQLMigration{})
func initEntityTables(mg *migrator.Migrator) {
grnLength := 256 // len(tenant)~8 + len(kind)!16 + len(kind)~128 = 256
tables := []migrator.Table{}
tables = append(tables, migrator.Table{
Name: "entity",
Columns: []*migrator.Column{
// Object ID (OID) will be unique across all objects/instances
// uuid5( tenant_id, kind + uid )
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false, IsPrimaryKey: true},
// primary identifier
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false, IsPrimaryKey: true},
{Name: "version", Type: migrator.DB_NVarchar, Length: 128, Nullable: false},
// The entity identifier
{Name: "tenant_id", Type: migrator.DB_BigInt, Nullable: false},
{Name: "kind", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "uid", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "folder", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "slug", Type: migrator.DB_NVarchar, Length: 189, Nullable: false}, // from title
{Name: "key", Type: migrator.DB_Text, Nullable: false},
{Name: "group", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "group_version", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "kind", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "uid", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "folder", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // uid of folder
// The raw entity body (any byte array)
{Name: "body", Type: migrator.DB_LongBlob, Nullable: true}, // null when nested or remote
{Name: "meta", Type: migrator.DB_Text, Nullable: true}, // raw meta object from k8s (with standard stuff removed)
{Name: "body", Type: migrator.DB_LongText, Nullable: true}, // null when nested or remote
{Name: "status", Type: migrator.DB_Text, Nullable: true}, // raw status object
{Name: "size", Type: migrator.DB_BigInt, Nullable: false},
{Name: "etag", Type: migrator.DB_NVarchar, Length: 32, Nullable: false, IsLatin: true}, // md5(body)
{Name: "version", Type: migrator.DB_NVarchar, Length: 128, Nullable: false},
// Who changed what when -- We should avoid JOINs with other tables in the database
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
// Who changed what when
{Name: "created_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "created_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
// Mark objects with origin metadata
{Name: "origin", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
getLatinPathColumn("origin_key"), // index with length 1024
{Name: "origin_key", Type: migrator.DB_Text, Nullable: false},
{Name: "origin_ts", Type: migrator.DB_BigInt, Nullable: false},
// Summary data (always extracted from the `body` column)
{Name: "name", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "description", Type: migrator.DB_NVarchar, Length: 255, Nullable: true},
{Name: "labels", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "fields", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "errors", Type: migrator.DB_Text, Nullable: true}, // JSON object
// Metadata
{Name: "name", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "slug", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // from title
{Name: "description", Type: migrator.DB_Text, Nullable: true},
// Commit message
{Name: "message", Type: migrator.DB_Text, Nullable: false}, // defaults to empty string
{Name: "labels", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "fields", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "errors", Type: migrator.DB_Text, Nullable: true}, // JSON object
},
Indices: []*migrator.Index{
{Cols: []string{"kind"}},
{Cols: []string{"folder"}},
{Cols: []string{"uid"}},
{Cols: []string{"tenant_id", "kind", "uid"}, Type: migrator.UniqueIndex},
// {Cols: []string{"tenant_id", "folder", "slug"}, Type: migrator.UniqueIndex},
{Cols: []string{"folder"}, Type: migrator.IndexType},
},
})
tables = append(tables, migrator.Table{
Name: "entity_history",
Columns: []*migrator.Column{
// only difference from entity table is that we store multiple versions of the same entity
// so we have a unique index on guid+version instead of guid as primary key
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
{Name: "version", Type: migrator.DB_NVarchar, Length: 128, Nullable: false},
// The entity identifier
{Name: "tenant_id", Type: migrator.DB_BigInt, Nullable: false},
{Name: "key", Type: migrator.DB_Text, Nullable: false},
{Name: "group", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "group_version", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "kind", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "uid", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "folder", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // uid of folder
{Name: "access", Type: migrator.DB_Text, Nullable: true}, // JSON object
// The raw entity body (any byte array)
{Name: "meta", Type: migrator.DB_Text, Nullable: true}, // raw meta object from k8s (with standard stuff removed)
{Name: "body", Type: migrator.DB_LongText, Nullable: true}, // null when nested or remote
{Name: "status", Type: migrator.DB_Text, Nullable: true}, // raw status object
{Name: "size", Type: migrator.DB_BigInt, Nullable: false},
{Name: "etag", Type: migrator.DB_NVarchar, Length: 32, Nullable: false, IsLatin: true}, // md5(body)
// Who changed what when
{Name: "created_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "created_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
// Mark objects with origin metadata
{Name: "origin", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "origin_key", Type: migrator.DB_Text, Nullable: false},
{Name: "origin_ts", Type: migrator.DB_BigInt, Nullable: false},
// Metadata
{Name: "name", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "slug", Type: migrator.DB_NVarchar, Length: 190, Nullable: false}, // from title
{Name: "description", Type: migrator.DB_Text, Nullable: true},
// Commit message
{Name: "message", Type: migrator.DB_Text, Nullable: false}, // defaults to empty string
{Name: "labels", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "fields", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "errors", Type: migrator.DB_Text, Nullable: true}, // JSON object
},
Indices: []*migrator.Index{
{Cols: []string{"guid", "version"}, Type: migrator.UniqueIndex},
{Cols: []string{"tenant_id", "kind", "uid", "version"}, Type: migrator.UniqueIndex},
},
})
@@ -71,33 +123,26 @@ func initEntityTables(mg *migrator.Migrator) {
tables = append(tables, migrator.Table{
Name: "entity_folder",
Columns: []*migrator.Column{
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false, IsPrimaryKey: true},
{Name: "tenant_id", Type: migrator.DB_BigInt, Nullable: false},
{Name: "uid", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
getLatinPathColumn("slug_path"), ///slug/slug/slug/
{Name: "tree", Type: migrator.DB_Text, Nullable: false}, // JSON []{uid, title}
{Name: "depth", Type: migrator.DB_Int, Nullable: false}, // starts at 1
{Name: "left", Type: migrator.DB_Int, Nullable: false}, // MPTT
{Name: "right", Type: migrator.DB_Int, Nullable: false}, // MPTT
{Name: "detached", Type: migrator.DB_Bool, Nullable: false}, // a parent folder was not found
},
Indices: []*migrator.Index{
{Cols: []string{"tenant_id", "uid"}, Type: migrator.UniqueIndex},
// {Cols: []string{"tenant_id", "slug_path"}, Type: migrator.UniqueIndex},
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false, IsPrimaryKey: true},
{Name: "slug_path", Type: migrator.DB_Text, Nullable: false}, // /slug/slug/slug/
{Name: "tree", Type: migrator.DB_Text, Nullable: false}, // JSON []{uid, title}
{Name: "depth", Type: migrator.DB_Int, Nullable: false}, // starts at 1
{Name: "lft", Type: migrator.DB_Int, Nullable: false}, // MPTT
{Name: "rgt", Type: migrator.DB_Int, Nullable: false}, // MPTT
{Name: "detached", Type: migrator.DB_Bool, Nullable: false}, // a parent folder was not found
},
})
tables = append(tables, migrator.Table{
Name: "entity_labels",
Columns: []*migrator.Column{
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false},
{Name: "label", Type: migrator.DB_NVarchar, Length: 191, Nullable: false},
{Name: "value", Type: migrator.DB_NVarchar, Length: 1024, Nullable: false},
{Name: "parent_grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: true},
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
{Name: "label", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "value", Type: migrator.DB_Text, Nullable: false},
},
Indices: []*migrator.Index{
{Cols: []string{"grn", "label"}, Type: migrator.UniqueIndex},
{Cols: []string{"parent_grn"}, Type: migrator.IndexType},
{Cols: []string{"guid", "label"}, Type: migrator.UniqueIndex},
},
})
@@ -105,78 +150,24 @@ func initEntityTables(mg *migrator.Migrator) {
Name: "entity_ref",
Columns: []*migrator.Column{
// Source:
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false},
{Name: "parent_grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: true},
{Name: "guid", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
// Address (defined in the body, not resolved, may be invalid and change)
{Name: "family", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "type", Type: migrator.DB_NVarchar, Length: 255, Nullable: true},
{Name: "id", Type: migrator.DB_NVarchar, Length: 1024, Nullable: true},
{Name: "family", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
{Name: "type", Type: migrator.DB_NVarchar, Length: 190, Nullable: true},
{Name: "id", Type: migrator.DB_NVarchar, Length: 190, Nullable: true},
// Runtime calcs (will depend on the system state)
{Name: "resolved_ok", Type: migrator.DB_Bool, Nullable: false},
{Name: "resolved_to", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "resolved_warning", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "resolved_to", Type: migrator.DB_NVarchar, Length: 36, Nullable: false},
{Name: "resolved_warning", Type: migrator.DB_Text, Nullable: false},
{Name: "resolved_time", Type: migrator.DB_DateTime, Nullable: false}, // resolution cache timestamp
},
Indices: []*migrator.Index{
{Cols: []string{"grn"}, Type: migrator.IndexType},
{Cols: []string{"guid"}, Type: migrator.IndexType},
{Cols: []string{"family"}, Type: migrator.IndexType},
{Cols: []string{"type"}, Type: migrator.IndexType},
{Cols: []string{"resolved_to"}, Type: migrator.IndexType},
{Cols: []string{"parent_grn"}, Type: migrator.IndexType},
},
})
tables = append(tables, migrator.Table{
Name: "entity_history",
Columns: []*migrator.Column{
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false},
{Name: "version", Type: migrator.DB_NVarchar, Length: 128, Nullable: false},
// Raw bytes
{Name: "body", Type: migrator.DB_LongBlob, Nullable: false},
{Name: "size", Type: migrator.DB_BigInt, Nullable: false},
{Name: "etag", Type: migrator.DB_NVarchar, Length: 32, Nullable: false, IsLatin: true}, // md5(body)
// Who changed what when
{Name: "updated_at", Type: migrator.DB_BigInt, Nullable: false},
{Name: "updated_by", Type: migrator.DB_NVarchar, Length: 190, Nullable: false},
// Commit message
{Name: "message", Type: migrator.DB_Text, Nullable: false}, // defaults to empty string
},
Indices: []*migrator.Index{
{Cols: []string{"grn", "version"}, Type: migrator.UniqueIndex},
{Cols: []string{"updated_by"}, Type: migrator.IndexType},
},
})
tables = append(tables, migrator.Table{
Name: "entity_nested",
Columns: []*migrator.Column{
{Name: "grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false, IsPrimaryKey: true},
{Name: "parent_grn", Type: migrator.DB_NVarchar, Length: grnLength, Nullable: false},
// The entity identifier
{Name: "tenant_id", Type: migrator.DB_BigInt, Nullable: false},
{Name: "kind", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "uid", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
{Name: "folder", Type: migrator.DB_NVarchar, Length: 40, Nullable: false},
// Summary data (always extracted from the `body` column)
{Name: "name", Type: migrator.DB_NVarchar, Length: 255, Nullable: false},
{Name: "description", Type: migrator.DB_NVarchar, Length: 255, Nullable: true},
{Name: "labels", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "fields", Type: migrator.DB_Text, Nullable: true}, // JSON object
{Name: "errors", Type: migrator.DB_Text, Nullable: true}, // JSON object
},
Indices: []*migrator.Index{
{Cols: []string{"parent_grn"}},
{Cols: []string{"kind"}},
{Cols: []string{"folder"}},
{Cols: []string{"uid"}},
{Cols: []string{"tenant_id", "kind", "uid"}, Type: migrator.UniqueIndex},
},
})
@@ -189,8 +180,5 @@ func initEntityTables(mg *migrator.Migrator) {
}
}
mg.AddMigration("set path collation on entity table", migrator.NewRawSQLMigration("").
// MySQL `utf8mb4_unicode_ci` collation is set in `mysql_dialect.go`
// SQLite uses a `BINARY` collation by default
Postgres("ALTER TABLE entity_folder ALTER COLUMN slug_path TYPE VARCHAR(1024) COLLATE \"C\";")) // Collate C - sorting done based on character code byte values
return marker
}

View File

@@ -4,69 +4,73 @@ import (
"context"
"fmt"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/services/sqlstore/session"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
)
func MigrateEntityStore(xdb db.DB, features featuremgmt.FeatureToggles) error {
func MigrateEntityStore(db sqlstash.EntityDB, features featuremgmt.FeatureToggles) error {
// Skip if feature flag is not enabled
if !features.IsEnabledGlobally(featuremgmt.FlagEntityStore) {
if !features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorage) {
return nil
}
// Migrations depend on upstream xorm implementations
sql, ok := xdb.(*sqlstore.SQLStore)
if !ok {
return nil
engine, err := db.GetEngine()
if err != nil {
return err
}
// !!! This should not run in production!
// The object store SQL schema is still in active development and this
// will only be called when the feature toggle is enabled
// this check should not be necessary, but is added as an extra check
if setting.Env == setting.Prod {
return nil
}
marker := "Initialize entity tables (v0)" // changing this key wipe+rewrite everything
mg := migrator.NewScopedMigrator(sql.GetEngine(), sql.Cfg, "entity")
mg := migrator.NewScopedMigrator(engine, db.GetCfg(), "entity")
mg.AddCreateMigration()
mg.AddMigration(marker, &migrator.RawSQLMigration{})
initEntityTables(mg)
marker := initEntityTables(mg)
// While this feature is under development, we can completly wipe and recreate
// The initial plan is to keep the source of truth in existing SQL tables, and mirrot it
// to a kubernetes model. Once the kubernetes model needs to be preserved,
// this code should be removed
log, err := mg.GetMigrationLog()
exists, err := engine.IsTableExist("entity_migration_log")
if err != nil {
return err
}
_, found := log[marker]
if !found && len(log) > 0 {
// Remove the migration log (and potential other orphan tables)
tables := []string{"entity_migration_log"}
ctx := context.Background()
err = sql.GetSqlxSession().WithTransaction(ctx, func(tx *session.SessionTx) error {
for _, t := range tables {
_, err := tx.Exec(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", t))
if err != nil {
return err
}
}
return nil
})
if exists {
log, err := mg.GetMigrationLog()
if err != nil {
return err
}
_, found := log[marker]
if !found && len(log) > 0 {
// Remove the migration log (and potential other orphan tables)
tables := []string{"entity_migration_log"}
ctx := context.Background()
sess, err := db.GetSession()
if err != nil {
return err
}
err = sess.WithTransaction(ctx, func(tx *session.SessionTx) error {
for _, t := range tables {
_, err := tx.Exec(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", t))
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
// remove old entries from in-memory log
for id := range log {
mg.RemoveMigrationLogs(id)
}
}
}
return mg.Start(
features.IsEnabledGlobally(featuremgmt.FlagMigrationLocking),
sql.GetMigrationLockAttemptTimeout())
0)
}

View File

@@ -0,0 +1,52 @@
package server
import (
"fmt"
"net"
"strconv"
// "github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/setting"
)
type config struct {
enabled bool
devMode bool
ip net.IP
port int
host string
apiURL string
logLevel int
}
func newConfig(cfg *setting.Cfg) *config {
defaultLogLevel := 0
// TODO
ip := net.ParseIP(cfg.HTTPAddr)
apiURL := cfg.AppURL
port, err := strconv.Atoi(cfg.HTTPPort)
if err != nil {
port = 3001
}
if cfg.Env == setting.Dev {
defaultLogLevel = 10
port = 3001
ip = net.ParseIP("127.0.0.1")
apiURL = fmt.Sprintf("https://%s:%d", ip, port)
}
host := fmt.Sprintf("%s:%d", ip, port)
return &config{
enabled: true, // cfg.IsFeatureToggleEnabled(featuremgmt.FlagGrafanaStorageServer),
devMode: cfg.Env == setting.Dev,
ip: ip,
port: port,
host: host,
logLevel: cfg.SectionWithEnvOverrides("storage-server").Key("log_level").MustInt(defaultLogLevel),
apiURL: apiURL,
}
}

View File

@@ -0,0 +1,196 @@
package server
import (
"context"
"fmt"
"strconv"
"github.com/go-jose/go-jose/v3/jwt"
"github.com/grafana/dskit/services"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc/metadata"
"github.com/grafana/grafana/pkg/infra/appcontext"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/modules"
"github.com/grafana/grafana/pkg/registry"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/grpcserver"
"github.com/grafana/grafana/pkg/services/grpcserver/interceptors"
"github.com/grafana/grafana/pkg/services/store/entity"
entityDB "github.com/grafana/grafana/pkg/services/store/entity/db"
"github.com/grafana/grafana/pkg/services/store/entity/sqlstash"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
)
var (
_ Service = (*service)(nil)
_ registry.BackgroundService = (*service)(nil)
_ registry.CanBeDisabled = (*service)(nil)
)
func init() {
// do nothing
}
type Service interface {
services.NamedService
registry.BackgroundService
registry.CanBeDisabled
}
type service struct {
*services.BasicService
config *config
cfg *setting.Cfg
features featuremgmt.FeatureToggles
stopCh chan struct{}
stoppedCh chan error
handler grpcserver.Provider
tracing *tracing.TracingService
authenticator interceptors.Authenticator
}
type Authenticator struct{}
func (f *Authenticator) Authenticate(ctx context.Context) (context.Context, error) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil, fmt.Errorf("no metadata found")
}
// TODO: use id token instead of these fields
login := md.Get("grafana-login")[0]
if login == "" {
return nil, fmt.Errorf("no login found in context")
}
userID, err := strconv.ParseInt(md.Get("grafana-userid")[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid user id: %w", err)
}
orgID, err := strconv.ParseInt(md.Get("grafana-orgid")[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid org id: %w", err)
}
// TODO: validate id token
idToken := md.Get("grafana-idtoken")[0]
if idToken == "" {
return nil, fmt.Errorf("no id token found in context")
}
jwtToken, err := jwt.ParseSigned(idToken)
if err != nil {
return nil, fmt.Errorf("invalid id token: %w", err)
}
claims := jwt.Claims{}
err = jwtToken.UnsafeClaimsWithoutVerification(&claims)
if err != nil {
return nil, fmt.Errorf("invalid id token: %w", err)
}
// fmt.Printf("JWT CLAIMS: %+v\n", claims)
return appcontext.WithUser(ctx, &user.SignedInUser{
Login: login,
UserID: userID,
OrgID: orgID,
}), nil
}
var _ interceptors.Authenticator = (*Authenticator)(nil)
func ProvideService(
cfg *setting.Cfg,
features featuremgmt.FeatureToggles,
) (*service, error) {
tracing, err := tracing.ProvideService(cfg)
if err != nil {
return nil, err
}
authn := &Authenticator{}
s := &service{
config: newConfig(cfg),
cfg: cfg,
features: features,
stopCh: make(chan struct{}),
authenticator: authn,
tracing: tracing,
}
// This will be used when running as a dskit service
s.BasicService = services.NewBasicService(s.start, s.running, nil).WithName(modules.StorageServer)
return s, nil
}
func (s *service) IsDisabled() bool {
return !s.config.enabled
}
// Run is an adapter for the BackgroundService interface.
func (s *service) Run(ctx context.Context) error {
if err := s.start(ctx); err != nil {
return err
}
return s.running(ctx)
}
func (s *service) start(ctx context.Context) error {
// TODO: use wire
// TODO: support using grafana db connection?
eDB, err := entityDB.ProvideEntityDB(nil, s.cfg, s.features)
if err != nil {
return err
}
err = eDB.Init()
if err != nil {
return err
}
store, err := sqlstash.ProvideSQLEntityServer(eDB)
if err != nil {
return err
}
s.handler, err = grpcserver.ProvideService(s.cfg, s.features, s.authenticator, s.tracing, prometheus.DefaultRegisterer)
if err != nil {
return err
}
entity.RegisterEntityStoreServer(s.handler.GetServer(), store)
err = s.handler.Run(ctx)
if err != nil {
return err
}
return nil
}
func (s *service) running(ctx context.Context) error {
// skip waiting for the server in prod mode
if !s.config.devMode {
<-ctx.Done()
return nil
}
select {
case err := <-s.stoppedCh:
if err != nil {
return err
}
case <-ctx.Done():
close(s.stopCh)
}
return nil
}

View File

@@ -4,18 +4,19 @@ import (
"context"
"encoding/json"
"github.com/grafana/grafana/pkg/infra/grn"
"github.com/grafana/grafana/pkg/services/sqlstore/session"
"github.com/grafana/grafana/pkg/services/store/entity"
)
type folderInfo struct {
UID string `json:"uid"`
Name string `json:"name"` // original display name
Slug string `json:"slug"` // full slug
Guid string `json:"guid"`
UID string `json:"uid"`
Name string `json:"name"` // original display name
SlugPath string `json:"slug"` // full slug path
// original slug
originalSlug string
Slug string `json:"-"`
depth int32
left int32
@@ -33,51 +34,48 @@ type folderInfo struct {
// This will replace all entries in `entity_folder`
// This is pretty heavy weight, but it does give us a sorted folder list
// NOTE: this could be done async with a mutex/lock? reconciler pattern
func updateFolderTree(ctx context.Context, tx *session.SessionTx, tenant int64) error {
_, err := tx.Exec(ctx, "DELETE FROM entity_folder WHERE tenant_id=?", tenant)
func updateFolderTree(ctx context.Context, tx *session.SessionTx, tenantId int64) error {
_, err := tx.Exec(ctx, "DELETE FROM entity_folder WHERE tenant_id=?", tenantId)
if err != nil {
return err
}
query := "SELECT guid,uid,folder,name,slug" +
" FROM entity" +
" WHERE kind=? AND tenant_id=?" +
" ORDER BY slug asc"
args := []interface{}{entity.StandardKindFolder, tenantId}
all := []*folderInfo{}
rows, err := tx.Query(ctx, "SELECT uid,folder,name,slug FROM entity WHERE kind=? AND tenant_id=? ORDER BY slug asc;",
entity.StandardKindFolder, tenant)
rows, err := tx.Query(ctx, query, args...)
if err != nil {
return err
}
defer func() { _ = rows.Close() }()
for rows.Next() {
folder := folderInfo{
children: []*folderInfo{},
}
err = rows.Scan(&folder.UID, &folder.parentUID, &folder.Name, &folder.originalSlug)
err = rows.Scan(&folder.Guid, &folder.UID, &folder.parentUID, &folder.Name, &folder.Slug)
if err != nil {
break
return err
}
all = append(all, &folder)
}
errClose := rows.Close()
// TODO: Use some kind of multi-error.
// Until then, we want to prioritize errors coming from the .Scan
// over those coming from .Close.
if err != nil {
return err
}
if errClose != nil {
return errClose
}
root, lost, err := buildFolderTree(all)
if err != nil {
return err
}
err = insertFolderInfo(ctx, tx, tenant, root, false)
err = insertFolderInfo(ctx, tx, tenantId, root, false)
if err != nil {
return err
}
for _, folder := range lost {
err = insertFolderInfo(ctx, tx, tenant, folder, true)
err = insertFolderInfo(ctx, tx, tenantId, folder, true)
if err != nil {
return err
}
@@ -123,9 +121,9 @@ func setMPTTOrder(folder *folderInfo, stack []*folderInfo, idx int32) (int32, er
folder.stack = stack
if folder.depth > 0 {
folder.Slug = "/"
folder.SlugPath = "/"
for _, f := range stack {
folder.Slug += f.originalSlug + "/"
folder.SlugPath += f.Slug + "/"
}
}
@@ -139,17 +137,16 @@ func setMPTTOrder(folder *folderInfo, stack []*folderInfo, idx int32) (int32, er
return folder.right, nil
}
func insertFolderInfo(ctx context.Context, tx *session.SessionTx, tenant int64, folder *folderInfo, isDetached bool) error {
func insertFolderInfo(ctx context.Context, tx *session.SessionTx, tenantId int64, folder *folderInfo, isDetached bool) error {
js, _ := json.Marshal(folder.stack)
grn2 := grn.GRN{TenantID: tenant, ResourceKind: entity.StandardKindFolder, ResourceIdentifier: folder.UID}
_, err := tx.Exec(ctx,
`INSERT INTO entity_folder `+
"(grn, tenant_id, uid, slug_path, tree, depth, left, right, detached) "+
"(guid, tenant_id, uid, slug_path, tree, depth, lft, rgt, detached) "+
`VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
grn2.ToGRNString(),
tenant,
folder.Guid,
tenantId,
folder.UID,
folder.Slug,
folder.SlugPath,
string(js),
folder.depth,
folder.left,
@@ -161,7 +158,7 @@ func insertFolderInfo(ctx context.Context, tx *session.SessionTx, tenant int64,
}
for _, sub := range folder.children {
err := insertFolderInfo(ctx, tx, tenant, sub, isDetached)
err := insertFolderInfo(ctx, tx, tenantId, sub, isDetached)
if err != nil {
return err
}

View File

@@ -12,9 +12,9 @@ import (
func TestFolderSupport(t *testing.T) {
root, lost, err := buildFolderTree([]*folderInfo{
{UID: "A", parentUID: "", Name: "A", originalSlug: "a"},
{UID: "AA", parentUID: "A", Name: "AA", originalSlug: "aa"},
{UID: "B", parentUID: "", Name: "B", originalSlug: "b"},
{Guid: "GA", UID: "A", parentUID: "", Name: "A", Slug: "a"},
{Guid: "GAA", UID: "AA", parentUID: "A", Name: "AA", Slug: "aa"},
{Guid: "GB", UID: "B", parentUID: "", Name: "B", Slug: "b"},
})
require.NoError(t, err)
require.NotNil(t, root)
@@ -51,7 +51,7 @@ func appendFolder(folder *folderInfo, frame *data.Frame) {
frame.AppendRow(
folder.UID,
folder.Name,
folder.Slug,
folder.SlugPath,
folder.depth,
folder.left,
folder.right,

File diff suppressed because it is too large Load Diff

View File

@@ -1,116 +0,0 @@
package sqlstash
import (
"encoding/json"
"github.com/grafana/grafana/pkg/infra/grn"
"github.com/grafana/grafana/pkg/services/store/entity"
)
type summarySupport struct {
model *entity.EntitySummary
name string
description *string // null or empty
slug *string // null or empty
labels *string
fields *string
errors *string // should not allow saving with this!
marshaled []byte
// metadata for nested objects
parent_grn *grn.GRN
folder string
isNested bool // set when this is for a nested item
}
func newSummarySupport(summary *entity.EntitySummary) (*summarySupport, error) {
var err error
var js []byte
s := &summarySupport{
model: summary,
}
if summary != nil {
s.marshaled, err = json.Marshal(summary)
if err != nil {
return s, err
}
s.name = summary.Name
if summary.Description != "" {
s.description = &summary.Description
}
if summary.Slug != "" {
s.slug = &summary.Slug
}
if len(summary.Labels) > 0 {
js, err = json.Marshal(summary.Labels)
if err != nil {
return s, err
}
str := string(js)
s.labels = &str
}
if len(summary.Fields) > 0 {
js, err = json.Marshal(summary.Fields)
if err != nil {
return s, err
}
str := string(js)
s.fields = &str
}
if summary.Error != nil {
js, err = json.Marshal(summary.Error)
if err != nil {
return s, err
}
str := string(js)
s.errors = &str
}
}
return s, err
}
func (s summarySupport) toEntitySummary() (*entity.EntitySummary, error) {
var err error
summary := &entity.EntitySummary{
Name: s.name,
}
if s.description != nil {
summary.Description = *s.description
}
if s.slug != nil {
summary.Slug = *s.slug
}
if s.labels != nil {
b := []byte(*s.labels)
err = json.Unmarshal(b, &summary.Labels)
if err != nil {
return summary, err
}
}
if s.fields != nil {
b := []byte(*s.fields)
err = json.Unmarshal(b, &summary.Fields)
if err != nil {
return summary, err
}
}
if s.errors != nil {
b := []byte(*s.errors)
err = json.Unmarshal(b, &summary.Error)
if err != nil {
return summary, err
}
}
return summary, err
}
func (s *summarySupport) getParentGRN() *string {
if s.isNested {
t := s.parent_grn.ToGRNString()
return &t
}
return nil
}

View File

@@ -3,16 +3,16 @@
// Frame[0]
// Name:
// Dimensions: 7 Fields by 4 Rows
// +----------------+----------------+----------------+---------------+---------------+---------------+--------------------------------------------------------------------------------+
// | Name: UID | Name: name | Name: slug | Name: depth | Name: left | Name: right | Name: tree |
// | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: |
// | Type: []string | Type: []string | Type: []string | Type: []int32 | Type: []int32 | Type: []int32 | Type: []json.RawMessage |
// +----------------+----------------+----------------+---------------+---------------+---------------+--------------------------------------------------------------------------------+
// | | Root | | 0 | 1 | 8 | [] |
// | A | A | /a/ | 1 | 2 | 5 | [{"uid":"A","name":"A","slug":"/a/"}] |
// | AA | AA | /a/aa/ | 2 | 3 | 4 | [{"uid":"A","name":"A","slug":"/a/"},{"uid":"AA","name":"AA","slug":"/a/aa/"}] |
// | B | B | /b/ | 1 | 6 | 7 | [{"uid":"B","name":"B","slug":"/b/"}] |
// +----------------+----------------+----------------+---------------+---------------+---------------+--------------------------------------------------------------------------------+
// +----------------+----------------+----------------+---------------+---------------+---------------+---------------------------------------------------------------------------------------------------------+
// | Name: UID | Name: name | Name: slug | Name: depth | Name: left | Name: right | Name: tree |
// | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: |
// | Type: []string | Type: []string | Type: []string | Type: []int32 | Type: []int32 | Type: []int32 | Type: []json.RawMessage |
// +----------------+----------------+----------------+---------------+---------------+---------------+---------------------------------------------------------------------------------------------------------+
// | | Root | | 0 | 1 | 8 | [] |
// | A | A | /a/ | 1 | 2 | 5 | [{"guid":"GA","uid":"A","name":"A","slug":"/a/"}] |
// | AA | AA | /a/aa/ | 2 | 3 | 4 | [{"guid":"GA","uid":"A","name":"A","slug":"/a/"},{"guid":"GAA","uid":"AA","name":"AA","slug":"/a/aa/"}] |
// | B | B | /b/ | 1 | 6 | 7 | [{"guid":"GB","uid":"B","name":"B","slug":"/b/"}] |
// +----------------+----------------+----------------+---------------+---------------+---------------+---------------------------------------------------------------------------------------------------------+
//
//
// 🌟 This was machine generated. Do not edit. 🌟
@@ -115,6 +115,7 @@
[],
[
{
"guid": "GA",
"uid": "A",
"name": "A",
"slug": "/a/"
@@ -122,11 +123,13 @@
],
[
{
"guid": "GA",
"uid": "A",
"name": "A",
"slug": "/a/"
},
{
"guid": "GAA",
"uid": "AA",
"name": "AA",
"slug": "/a/aa/"
@@ -134,6 +137,7 @@
],
[
{
"guid": "GB",
"uid": "B",
"name": "B",
"slug": "/b/"

View File

@@ -5,7 +5,11 @@ import (
"encoding/hex"
)
func createContentsHash(contents []byte) string {
hash := md5.Sum(contents)
func createContentsHash(body []byte, meta []byte, status []byte) string {
h := md5.New()
_, _ = h.Write(meta)
_, _ = h.Write(body)
_, _ = h.Write(status)
hash := h.Sum(nil)
return hex.EncodeToString(hash[:])
}

View File

@@ -64,7 +64,7 @@ func createTestContext(t *testing.T) testContext {
dir, path := testinfra.CreateGrafDir(t, testinfra.GrafanaOpts{
EnableFeatureToggles: []string{
featuremgmt.FlagGrpcServer,
featuremgmt.FlagEntityStore,
featuremgmt.FlagUnifiedStorage,
},
AppModeProduction: false, // required for migrations to run
GRPCServerAddress: "127.0.0.1:0", // :0 for choosing the port automatically

View File

@@ -96,7 +96,7 @@ func requireEntityMatch(t *testing.T, obj *entity.Entity, m rawEntityMatcher) {
require.True(t, len(mismatches) == 0, mismatches)
}
func requireVersionMatch(t *testing.T, obj *entity.EntityVersionInfo, m objectVersionMatcher) {
func requireVersionMatch(t *testing.T, obj *entity.Entity, m objectVersionMatcher) {
t.Helper()
mismatches := ""
@@ -154,9 +154,11 @@ func TestIntegrationEntityServer(t *testing.T) {
t.Run("should be able to read persisted objects", func(t *testing.T) {
before := time.Now()
writeReq := &entity.WriteEntityRequest{
GRN: testGrn,
Body: body,
Comment: "first entity!",
Entity: &entity.Entity{
GRN: testGrn,
Body: body,
Message: "first entity!",
},
}
writeResp, err := testCtx.client.Write(ctx, writeReq)
require.NoError(t, err)
@@ -165,7 +167,7 @@ func TestIntegrationEntityServer(t *testing.T) {
updatedRange: []time.Time{before, time.Now()},
updatedBy: fakeUser,
version: &firstVersion,
comment: &writeReq.Comment,
comment: &writeReq.Entity.Message,
}
requireVersionMatch(t, writeResp.Entity, versionMatcher)
@@ -175,7 +177,6 @@ func TestIntegrationEntityServer(t *testing.T) {
WithBody: true,
})
require.NoError(t, err)
require.Nil(t, readResp.SummaryJson)
require.NotNil(t, readResp)
foundGRN := readResp.GRN
@@ -200,7 +201,7 @@ func TestIntegrationEntityServer(t *testing.T) {
PreviousVersion: writeResp.Entity.Version,
})
require.NoError(t, err)
require.True(t, deleteResp.OK)
require.Equal(t, deleteResp.Status, entity.DeleteEntityResponse_DELETED)
readRespAfterDelete, err := testCtx.client.Read(ctx, &entity.ReadEntityRequest{
GRN: testGrn,
@@ -219,9 +220,11 @@ func TestIntegrationEntityServer(t *testing.T) {
}
writeReq1 := &entity.WriteEntityRequest{
GRN: testGrn,
Body: body,
Comment: "first entity!",
Entity: &entity.Entity{
GRN: testGrn,
Body: body,
Message: "first entity!",
},
}
writeResp1, err := testCtx.client.Write(ctx, writeReq1)
require.NoError(t, err)
@@ -230,9 +233,11 @@ func TestIntegrationEntityServer(t *testing.T) {
body2 := []byte("{\"name\":\"John2\"}")
writeReq2 := &entity.WriteEntityRequest{
GRN: testGrn,
Body: body2,
Comment: "update1",
Entity: &entity.Entity{
GRN: testGrn,
Body: body2,
Message: "update1",
},
}
writeResp2, err := testCtx.client.Write(ctx, writeReq2)
require.NoError(t, err)
@@ -248,9 +253,11 @@ func TestIntegrationEntityServer(t *testing.T) {
body3 := []byte("{\"name\":\"John3\"}")
writeReq3 := &entity.WriteEntityRequest{
GRN: testGrn,
Body: body3,
Comment: "update3",
Entity: &entity.Entity{
GRN: testGrn,
Body: body3,
Message: "update3",
},
}
writeResp3, err := testCtx.client.Write(ctx, writeReq3)
require.NoError(t, err)
@@ -271,7 +278,6 @@ func TestIntegrationEntityServer(t *testing.T) {
WithBody: true,
})
require.NoError(t, err)
require.Nil(t, readRespLatest.SummaryJson)
requireEntityMatch(t, readRespLatest, latestMatcher)
readRespFirstVer, err := testCtx.client.Read(ctx, &entity.ReadEntityRequest{
@@ -281,7 +287,6 @@ func TestIntegrationEntityServer(t *testing.T) {
})
require.NoError(t, err)
require.Nil(t, readRespFirstVer.SummaryJson)
require.NotNil(t, readRespFirstVer)
requireEntityMatch(t, readRespFirstVer, rawEntityMatcher{
grn: testGrn,
@@ -297,7 +302,7 @@ func TestIntegrationEntityServer(t *testing.T) {
GRN: testGrn,
})
require.NoError(t, err)
require.Equal(t, []*entity.EntityVersionInfo{
require.Equal(t, []*entity.Entity{
writeResp3.Entity,
writeResp2.Entity,
writeResp1.Entity,
@@ -308,58 +313,66 @@ func TestIntegrationEntityServer(t *testing.T) {
PreviousVersion: writeResp3.Entity.Version,
})
require.NoError(t, err)
require.True(t, deleteResp.OK)
require.Equal(t, deleteResp.Status, entity.DeleteEntityResponse_DELETED)
})
t.Run("should be able to search for objects", func(t *testing.T) {
t.Run("should be able to list objects", func(t *testing.T) {
uid2 := "uid2"
uid3 := "uid3"
uid4 := "uid4"
kind2 := entity.StandardKindPlaylist
w1, err := testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: testGrn,
Body: body,
Entity: &entity.Entity{
GRN: testGrn,
Body: body,
},
})
require.NoError(t, err)
w2, err := testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: &grn.GRN{
ResourceIdentifier: uid2,
ResourceKind: kind,
Entity: &entity.Entity{
GRN: &grn.GRN{
ResourceIdentifier: uid2,
ResourceKind: kind,
},
Body: body,
},
Body: body,
})
require.NoError(t, err)
w3, err := testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: &grn.GRN{
ResourceIdentifier: uid3,
ResourceKind: kind2,
Entity: &entity.Entity{
GRN: &grn.GRN{
ResourceIdentifier: uid3,
ResourceKind: kind2,
},
Body: body,
},
Body: body,
})
require.NoError(t, err)
w4, err := testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: &grn.GRN{
ResourceIdentifier: uid4,
ResourceKind: kind2,
Entity: &entity.Entity{
GRN: &grn.GRN{
ResourceIdentifier: uid4,
ResourceKind: kind2,
},
Body: body,
},
Body: body,
})
require.NoError(t, err)
search, err := testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err := testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind, kind2},
WithBody: false,
})
require.NoError(t, err)
require.NotNil(t, search)
uids := make([]string, 0, len(search.Results))
kinds := make([]string, 0, len(search.Results))
version := make([]string, 0, len(search.Results))
for _, res := range search.Results {
require.NotNil(t, resp)
uids := make([]string, 0, len(resp.Results))
kinds := make([]string, 0, len(resp.Results))
version := make([]string, 0, len(resp.Results))
for _, res := range resp.Results {
uids = append(uids, res.GRN.ResourceIdentifier)
kinds = append(kinds, res.GRN.ResourceKind)
version = append(version, res.Version)
@@ -374,14 +387,14 @@ func TestIntegrationEntityServer(t *testing.T) {
}, version)
// Again with only one kind
searchKind1, err := testCtx.client.Search(ctx, &entity.EntitySearchRequest{
respKind1, err := testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
})
require.NoError(t, err)
uids = make([]string, 0, len(searchKind1.Results))
kinds = make([]string, 0, len(searchKind1.Results))
version = make([]string, 0, len(searchKind1.Results))
for _, res := range searchKind1.Results {
uids = make([]string, 0, len(respKind1.Results))
kinds = make([]string, 0, len(respKind1.Results))
version = make([]string, 0, len(respKind1.Results))
for _, res := range respKind1.Results {
uids = append(uids, res.GRN.ResourceIdentifier)
kinds = append(kinds, res.GRN.ResourceKind)
version = append(version, res.Version)
@@ -397,24 +410,28 @@ func TestIntegrationEntityServer(t *testing.T) {
t.Run("should be able to filter objects based on their labels", func(t *testing.T) {
kind := entity.StandardKindDashboard
_, err := testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: &grn.GRN{
ResourceKind: kind,
ResourceIdentifier: "blue-green",
Entity: &entity.Entity{
GRN: &grn.GRN{
ResourceKind: kind,
ResourceIdentifier: "blue-green",
},
Body: []byte(dashboardWithTagsBlueGreen),
},
Body: []byte(dashboardWithTagsBlueGreen),
})
require.NoError(t, err)
_, err = testCtx.client.Write(ctx, &entity.WriteEntityRequest{
GRN: &grn.GRN{
ResourceKind: kind,
ResourceIdentifier: "red-green",
Entity: &entity.Entity{
GRN: &grn.GRN{
ResourceKind: kind,
ResourceIdentifier: "red-green",
},
Body: []byte(dashboardWithTagsRedGreen),
},
Body: []byte(dashboardWithTagsRedGreen),
})
require.NoError(t, err)
search, err := testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err := testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
WithBody: false,
WithLabels: true,
@@ -423,11 +440,11 @@ func TestIntegrationEntityServer(t *testing.T) {
},
})
require.NoError(t, err)
require.NotNil(t, search)
require.Len(t, search.Results, 1)
require.Equal(t, search.Results[0].GRN.ResourceIdentifier, "red-green")
require.NotNil(t, resp)
require.Len(t, resp.Results, 1)
require.Equal(t, resp.Results[0].GRN.ResourceIdentifier, "red-green")
search, err = testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
WithBody: false,
WithLabels: true,
@@ -437,11 +454,11 @@ func TestIntegrationEntityServer(t *testing.T) {
},
})
require.NoError(t, err)
require.NotNil(t, search)
require.Len(t, search.Results, 1)
require.Equal(t, search.Results[0].GRN.ResourceIdentifier, "red-green")
require.NotNil(t, resp)
require.Len(t, resp.Results, 1)
require.Equal(t, resp.Results[0].GRN.ResourceIdentifier, "red-green")
search, err = testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
WithBody: false,
WithLabels: true,
@@ -450,10 +467,10 @@ func TestIntegrationEntityServer(t *testing.T) {
},
})
require.NoError(t, err)
require.NotNil(t, search)
require.Len(t, search.Results, 0)
require.NotNil(t, resp)
require.Len(t, resp.Results, 0)
search, err = testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
WithBody: false,
WithLabels: true,
@@ -462,10 +479,10 @@ func TestIntegrationEntityServer(t *testing.T) {
},
})
require.NoError(t, err)
require.NotNil(t, search)
require.Len(t, search.Results, 2)
require.NotNil(t, resp)
require.Len(t, resp.Results, 2)
search, err = testCtx.client.Search(ctx, &entity.EntitySearchRequest{
resp, err = testCtx.client.List(ctx, &entity.EntityListRequest{
Kind: []string{kind},
WithBody: false,
WithLabels: true,
@@ -474,7 +491,7 @@ func TestIntegrationEntityServer(t *testing.T) {
},
})
require.NoError(t, err)
require.NotNil(t, search)
require.Len(t, search.Results, 0)
require.NotNil(t, resp)
require.Len(t, resp.Results, 0)
})
}

View File

@@ -3,10 +3,7 @@ package entity
// The admin request is a superset of write request features
func ToAdminWriteEntityRequest(req *WriteEntityRequest) *AdminWriteEntityRequest {
return &AdminWriteEntityRequest{
GRN: req.GRN,
Body: req.Body,
Folder: req.Folder,
Comment: req.Comment,
Entity: req.Entity,
PreviousVersion: req.PreviousVersion,
}
}

View File

@@ -19,7 +19,6 @@ import (
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/quota"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/store/entity/migrations"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
)
@@ -104,10 +103,6 @@ func ProvideService(
grafanaStorageLogger.Warn("Error loading storage config", "error", err)
}
if err := migrations.MigrateEntityStore(sql, features); err != nil {
return nil, err
}
// always exists
globalRoots := []storageRuntime{
newDiskStorage(RootStorageMeta{