mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
K8s: Refactor client to be generic (#99231)
This commit is contained in:
parent
d346819d62
commit
b309c5daed
237
pkg/services/apiserver/client/client.go
Normal file
237
pkg/services/apiserver/client/client.go
Normal file
@ -0,0 +1,237 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
k8sUser "k8s.io/apiserver/pkg/authentication/user"
|
||||
k8sRequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
)
|
||||
|
||||
type K8sHandler interface {
|
||||
GetNamespace(orgID int64) string
|
||||
Get(ctx context.Context, name string, orgID int64, subresource ...string) (*unstructured.Unstructured, error)
|
||||
Create(ctx context.Context, obj *unstructured.Unstructured, orgID int64) (*unstructured.Unstructured, error)
|
||||
Update(ctx context.Context, obj *unstructured.Unstructured, orgID int64) (*unstructured.Unstructured, error)
|
||||
Delete(ctx context.Context, name string, orgID int64, options v1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, orgID int64) error
|
||||
List(ctx context.Context, orgID int64, options v1.ListOptions) (*unstructured.UnstructuredList, error)
|
||||
Search(ctx context.Context, orgID int64, in *resource.ResourceSearchRequest) (*resource.ResourceSearchResponse, error)
|
||||
GetStats(ctx context.Context, orgID int64) (*resource.ResourceStatsResponse, error)
|
||||
}
|
||||
|
||||
var _ K8sHandler = (*k8sHandler)(nil)
|
||||
|
||||
type k8sHandler struct {
|
||||
namespacer request.NamespaceMapper
|
||||
gvr schema.GroupVersionResource
|
||||
restConfigProvider apiserver.RestConfigProvider
|
||||
searcher resource.ResourceIndexClient
|
||||
}
|
||||
|
||||
func NewK8sHandler(namespacer request.NamespaceMapper, gvr schema.GroupVersionResource, restConfigProvider apiserver.RestConfigProvider, searcher resource.ResourceIndexClient) K8sHandler {
|
||||
return &k8sHandler{
|
||||
namespacer: namespacer,
|
||||
gvr: gvr,
|
||||
restConfigProvider: restConfigProvider,
|
||||
searcher: searcher,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *k8sHandler) GetNamespace(orgID int64) string {
|
||||
return h.namespacer(orgID)
|
||||
}
|
||||
|
||||
func (h *k8sHandler) Get(ctx context.Context, name string, orgID int64, subresource ...string) (*unstructured.Unstructured, error) {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := h.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := h.getClient(newCtx, orgID)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return client.Get(newCtx, name, v1.GetOptions{}, subresource...)
|
||||
}
|
||||
|
||||
func (h *k8sHandler) Create(ctx context.Context, obj *unstructured.Unstructured, orgID int64) (*unstructured.Unstructured, error) {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := h.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := h.getClient(newCtx, orgID)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return client.Create(newCtx, obj, v1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (h *k8sHandler) Update(ctx context.Context, obj *unstructured.Unstructured, orgID int64) (*unstructured.Unstructured, error) {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := h.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := h.getClient(newCtx, orgID)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return client.Update(newCtx, obj, v1.UpdateOptions{})
|
||||
}
|
||||
|
||||
func (h *k8sHandler) Delete(ctx context.Context, name string, orgID int64, options v1.DeleteOptions) error {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := h.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := h.getClient(newCtx, orgID)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return client.Delete(newCtx, name, options)
|
||||
}
|
||||
|
||||
func (h *k8sHandler) DeleteCollection(ctx context.Context, orgID int64) error {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := h.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := h.getClient(newCtx, orgID)
|
||||
if !ok {
|
||||
return fmt.Errorf("could not get k8s client")
|
||||
}
|
||||
|
||||
return client.DeleteCollection(newCtx, v1.DeleteOptions{}, v1.ListOptions{})
|
||||
}
|
||||
|
||||
func (h *k8sHandler) List(ctx context.Context, orgID int64, options v1.ListOptions) (*unstructured.UnstructuredList, error) {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := h.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := h.getClient(newCtx, orgID)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not get k8s client")
|
||||
}
|
||||
|
||||
return client.List(newCtx, options)
|
||||
}
|
||||
|
||||
func (h *k8sHandler) Search(ctx context.Context, orgID int64, in *resource.ResourceSearchRequest) (*resource.ResourceSearchResponse, error) {
|
||||
// goes directly through grpc, so doesn't need the new context
|
||||
if in.Options == nil {
|
||||
in.Options = &resource.ListOptions{}
|
||||
}
|
||||
|
||||
in.Options.Key = &resource.ResourceKey{
|
||||
Namespace: h.GetNamespace(orgID),
|
||||
Group: h.gvr.Group,
|
||||
Resource: h.gvr.Resource,
|
||||
}
|
||||
|
||||
return h.searcher.Search(ctx, in)
|
||||
}
|
||||
|
||||
func (h *k8sHandler) GetStats(ctx context.Context, orgID int64) (*resource.ResourceStatsResponse, error) {
|
||||
// goes directly through grpc, so doesn't need the new context
|
||||
return h.searcher.GetStats(ctx, &resource.ResourceStatsRequest{
|
||||
Namespace: h.GetNamespace(orgID),
|
||||
Kinds: []string{
|
||||
h.gvr.Group + "/" + h.gvr.Resource,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (h *k8sHandler) getClient(ctx context.Context, orgID int64) (dynamic.ResourceInterface, bool) {
|
||||
cfg := h.restConfigProvider.GetRestConfig(ctx)
|
||||
if cfg == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
dyn, err := dynamic.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return dyn.Resource(h.gvr).Namespace(h.GetNamespace(orgID)), true
|
||||
}
|
||||
|
||||
func (h *k8sHandler) getK8sContext(ctx context.Context) (context.Context, context.CancelFunc, error) {
|
||||
requester, requesterErr := identity.GetRequester(ctx)
|
||||
if requesterErr != nil {
|
||||
return nil, nil, requesterErr
|
||||
}
|
||||
|
||||
user, exists := k8sRequest.UserFrom(ctx)
|
||||
if !exists {
|
||||
// add in k8s user if not there yet
|
||||
var ok bool
|
||||
user, ok = requester.(k8sUser.Info)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("could not convert user to k8s user")
|
||||
}
|
||||
}
|
||||
|
||||
newCtx := k8sRequest.WithUser(context.Background(), user)
|
||||
newCtx = log.WithContextualAttributes(newCtx, log.FromContext(ctx))
|
||||
// TODO: after GLSA token workflow is removed, make this return early
|
||||
// and move the else below to be unconditional
|
||||
if requesterErr == nil {
|
||||
newCtxWithRequester := identity.WithRequester(newCtx, requester)
|
||||
newCtx = newCtxWithRequester
|
||||
}
|
||||
|
||||
// inherit the deadline from the original context, if it exists
|
||||
deadline, ok := ctx.Deadline()
|
||||
if ok {
|
||||
var newCancel context.CancelFunc
|
||||
newCtx, newCancel = context.WithTimeout(newCtx, time.Until(deadline))
|
||||
return newCtx, newCancel, nil
|
||||
}
|
||||
|
||||
return newCtx, nil, nil
|
||||
}
|
81
pkg/services/apiserver/client/client_mock.go
Normal file
81
pkg/services/apiserver/client/client_mock.go
Normal file
@ -0,0 +1,81 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
var _ K8sHandler = (*MockK8sHandler)(nil)
|
||||
|
||||
type MockK8sHandler struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockK8sHandler) GetNamespace(orgID int64) string {
|
||||
args := m.Called(orgID)
|
||||
return args.String(0)
|
||||
}
|
||||
|
||||
func (m *MockK8sHandler) Get(ctx context.Context, name string, orgID int64, subresource ...string) (*unstructured.Unstructured, error) {
|
||||
args := m.Called(ctx, name, orgID, subresource)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(*unstructured.Unstructured), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *MockK8sHandler) Create(ctx context.Context, obj *unstructured.Unstructured, orgID int64) (*unstructured.Unstructured, error) {
|
||||
args := m.Called(ctx, obj, orgID)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*unstructured.Unstructured), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *MockK8sHandler) Update(ctx context.Context, obj *unstructured.Unstructured, orgID int64) (*unstructured.Unstructured, error) {
|
||||
args := m.Called(ctx, obj, orgID)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*unstructured.Unstructured), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *MockK8sHandler) Delete(ctx context.Context, name string, orgID int64, options v1.DeleteOptions) error {
|
||||
args := m.Called(ctx, name, orgID, options)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockK8sHandler) DeleteCollection(ctx context.Context, orgID int64) error {
|
||||
args := m.Called(ctx, orgID)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockK8sHandler) List(ctx context.Context, orgID int64, options v1.ListOptions) (*unstructured.UnstructuredList, error) {
|
||||
args := m.Called(ctx, orgID, options)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*unstructured.UnstructuredList), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *MockK8sHandler) Search(ctx context.Context, orgID int64, in *resource.ResourceSearchRequest) (*resource.ResourceSearchResponse, error) {
|
||||
args := m.Called(ctx, orgID, in)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*resource.ResourceSearchResponse), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *MockK8sHandler) GetStats(ctx context.Context, orgID int64) (*resource.ResourceStatsResponse, error) {
|
||||
args := m.Called(ctx, orgID)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*resource.ResourceStatsResponse), args.Error(1)
|
||||
}
|
@ -20,11 +20,7 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
k8sUser "k8s.io/apiserver/pkg/authentication/user"
|
||||
k8sRequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/client-go/dynamic"
|
||||
|
||||
"github.com/grafana/authlib/claims"
|
||||
|
||||
@ -38,6 +34,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/infra/slugify"
|
||||
"github.com/grafana/grafana/pkg/services/accesscontrol"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/client"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards/dashboardaccess"
|
||||
@ -95,26 +92,10 @@ type DashboardServiceImpl struct {
|
||||
folderPermissions accesscontrol.FolderPermissionsService
|
||||
dashboardPermissions accesscontrol.DashboardPermissionsService
|
||||
ac accesscontrol.AccessControl
|
||||
k8sclient dashboardK8sHandler
|
||||
k8sclient client.K8sHandler
|
||||
metrics *dashboardsMetrics
|
||||
}
|
||||
|
||||
// interface to allow for testing
|
||||
type dashboardK8sHandler interface {
|
||||
getClient(ctx context.Context, orgID int64) (dynamic.ResourceInterface, bool)
|
||||
getNamespace(orgID int64) string
|
||||
getSearcher() resource.ResourceIndexClient
|
||||
}
|
||||
|
||||
var _ dashboardK8sHandler = (*dashk8sHandler)(nil)
|
||||
|
||||
type dashk8sHandler struct {
|
||||
namespacer request.NamespaceMapper
|
||||
gvr schema.GroupVersionResource
|
||||
restConfigProvider apiserver.RestConfigProvider
|
||||
searcher resource.ResourceIndexClient
|
||||
}
|
||||
|
||||
// This is the uber service that implements a three smaller services
|
||||
func ProvideDashboardServiceImpl(
|
||||
cfg *setting.Cfg, dashboardStore dashboards.Store, folderStore folder.FolderStore,
|
||||
@ -124,12 +105,7 @@ func ProvideDashboardServiceImpl(
|
||||
restConfigProvider apiserver.RestConfigProvider, userService user.Service, unified resource.ResourceClient,
|
||||
quotaService quota.Service, orgService org.Service,
|
||||
) (*DashboardServiceImpl, error) {
|
||||
k8sHandler := &dashk8sHandler{
|
||||
gvr: v0alpha1.DashboardResourceInfo.GroupVersionResource(),
|
||||
namespacer: request.GetNamespaceMapper(cfg),
|
||||
restConfigProvider: restConfigProvider,
|
||||
searcher: unified,
|
||||
}
|
||||
k8sHandler := client.NewK8sHandler(request.GetNamespaceMapper(cfg), v0alpha1.DashboardResourceInfo.GroupVersionResource(), restConfigProvider, unified)
|
||||
|
||||
dashSvc := &DashboardServiceImpl{
|
||||
cfg: cfg,
|
||||
@ -207,12 +183,7 @@ func (dr *DashboardServiceImpl) Count(ctx context.Context, scopeParams *quota.Sc
|
||||
|
||||
func (dr *DashboardServiceImpl) CountDashboardsInOrg(ctx context.Context, orgID int64) (int64, error) {
|
||||
if dr.features.IsEnabledGlobally(featuremgmt.FlagKubernetesCliDashboards) {
|
||||
resp, err := dr.k8sclient.getSearcher().GetStats(ctx, &resource.ResourceStatsRequest{
|
||||
Namespace: dr.k8sclient.getNamespace(orgID),
|
||||
Kinds: []string{
|
||||
v0alpha1.GROUP + "/" + v0alpha1.DashboardResourceInfo.GetName(),
|
||||
},
|
||||
})
|
||||
resp, err := dr.k8sclient.GetStats(ctx, orgID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -1355,14 +1326,7 @@ func makeQueryResult(query *dashboards.FindPersistedDashboardsQuery, res []dashb
|
||||
|
||||
func (dr *DashboardServiceImpl) GetDashboardTags(ctx context.Context, query *dashboards.GetDashboardTagsQuery) ([]*dashboards.DashboardTagCloudItem, error) {
|
||||
if dr.features.IsEnabled(ctx, featuremgmt.FlagKubernetesCliDashboards) {
|
||||
res, err := dr.k8sclient.getSearcher().Search(ctx, &resource.ResourceSearchRequest{
|
||||
Options: &resource.ListOptions{
|
||||
Key: &resource.ResourceKey{
|
||||
Namespace: dr.k8sclient.getNamespace(query.OrgID),
|
||||
Group: "dashboard.grafana.app",
|
||||
Resource: "dashboards",
|
||||
},
|
||||
},
|
||||
res, err := dr.k8sclient.Search(ctx, query.OrgID, &resource.ResourceSearchRequest{
|
||||
Facet: map[string]*resource.ResourceSearchRequest_Facet{
|
||||
"tags": {
|
||||
Field: "tags",
|
||||
@ -1434,79 +1398,7 @@ func (dr *DashboardServiceImpl) CleanUpDeletedDashboards(ctx context.Context) (i
|
||||
// Dashboard k8s functions
|
||||
// -----------------------------------------------------------------------------------------
|
||||
|
||||
func (dk8s *dashk8sHandler) getClient(ctx context.Context, orgID int64) (dynamic.ResourceInterface, bool) {
|
||||
cfg := dk8s.restConfigProvider.GetRestConfig(ctx)
|
||||
if cfg == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
dyn, err := dynamic.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return dyn.Resource(dk8s.gvr).Namespace(dk8s.getNamespace(orgID)), true
|
||||
}
|
||||
|
||||
func (dk8s *dashk8sHandler) getNamespace(orgID int64) string {
|
||||
return dk8s.namespacer(orgID)
|
||||
}
|
||||
|
||||
func (dk8s *dashk8sHandler) getSearcher() resource.ResourceIndexClient {
|
||||
return dk8s.searcher
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) getK8sContext(ctx context.Context) (context.Context, context.CancelFunc, error) {
|
||||
requester, requesterErr := identity.GetRequester(ctx)
|
||||
if requesterErr != nil {
|
||||
return nil, nil, requesterErr
|
||||
}
|
||||
|
||||
user, exists := k8sRequest.UserFrom(ctx)
|
||||
if !exists {
|
||||
// add in k8s user if not there yet
|
||||
var ok bool
|
||||
user, ok = requester.(k8sUser.Info)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("could not convert user to k8s user")
|
||||
}
|
||||
}
|
||||
|
||||
newCtx := k8sRequest.WithUser(context.Background(), user)
|
||||
newCtx = log.WithContextualAttributes(newCtx, log.FromContext(ctx))
|
||||
// TODO: after GLSA token workflow is removed, make this return early
|
||||
// and move the else below to be unconditional
|
||||
if requesterErr == nil {
|
||||
newCtxWithRequester := identity.WithRequester(newCtx, requester)
|
||||
newCtx = newCtxWithRequester
|
||||
}
|
||||
|
||||
// inherit the deadline from the original context, if it exists
|
||||
deadline, ok := ctx.Deadline()
|
||||
if ok {
|
||||
var newCancel context.CancelFunc
|
||||
newCtx, newCancel = context.WithTimeout(newCtx, time.Until(deadline))
|
||||
return newCtx, newCancel, nil
|
||||
}
|
||||
|
||||
return newCtx, nil, nil
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) getDashboardThroughK8s(ctx context.Context, query *dashboards.GetDashboardQuery) (*dashboards.Dashboard, error) {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := dr.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := dr.k8sclient.getClient(newCtx, query.OrgID)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// if including deleted dashboards for restore, use the /latest subresource
|
||||
subresource := ""
|
||||
if query.IncludeDeleted && dr.features.IsEnabledGlobally(featuremgmt.FlagKubernetesRestore) {
|
||||
@ -1525,7 +1417,7 @@ func (dr *DashboardServiceImpl) getDashboardThroughK8s(ctx context.Context, quer
|
||||
query.UID = result.UID
|
||||
}
|
||||
|
||||
out, err := client.Get(newCtx, query.UID, v1.GetOptions{}, subresource)
|
||||
out, err := dr.k8sclient.Get(ctx, query.UID, query.OrgID, subresource)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
} else if err != nil || out == nil {
|
||||
@ -1541,21 +1433,7 @@ func (dr *DashboardServiceImpl) saveProvisionedDashboardThroughK8s(ctx context.C
|
||||
cmd.OrgID = 1
|
||||
}
|
||||
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := dr.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := dr.k8sclient.getClient(newCtx, cmd.OrgID)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
obj, err := LegacySaveCommandToUnstructured(cmd, dr.k8sclient.getNamespace(cmd.OrgID))
|
||||
obj, err := LegacySaveCommandToUnstructured(cmd, dr.k8sclient.GetNamespace(cmd.OrgID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1577,61 +1455,40 @@ func (dr *DashboardServiceImpl) saveProvisionedDashboardThroughK8s(ctx context.C
|
||||
}
|
||||
obj.SetAnnotations(annotations)
|
||||
|
||||
var out *unstructured.Unstructured
|
||||
current, err := client.Get(newCtx, obj.GetName(), v1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
} else if current == nil || (err != nil && apierrors.IsNotFound(err)) {
|
||||
out, err = client.Create(newCtx, &obj, v1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
out, err = client.Update(newCtx, &obj, v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
finalDash, err := dr.UnstructuredToLegacyDashboard(ctx, out, cmd.OrgID)
|
||||
out, err := dr.createOrUpdateDash(ctx, obj, cmd.OrgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return finalDash, nil
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) saveDashboardThroughK8s(ctx context.Context, cmd *dashboards.SaveDashboardCommand, orgID int64) (*dashboards.Dashboard, error) {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := dr.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := dr.k8sclient.getClient(newCtx, orgID)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
obj, err := LegacySaveCommandToUnstructured(cmd, dr.k8sclient.getNamespace(orgID))
|
||||
obj, err := LegacySaveCommandToUnstructured(cmd, dr.k8sclient.GetNamespace(orgID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setPluginID(obj, cmd.PluginID)
|
||||
|
||||
out, err := dr.createOrUpdateDash(ctx, obj, orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) createOrUpdateDash(ctx context.Context, obj unstructured.Unstructured, orgID int64) (*dashboards.Dashboard, error) {
|
||||
var out *unstructured.Unstructured
|
||||
current, err := client.Get(newCtx, obj.GetName(), v1.GetOptions{})
|
||||
current, err := dr.k8sclient.Get(ctx, obj.GetName(), orgID)
|
||||
if current == nil || err != nil {
|
||||
out, err = client.Create(newCtx, &obj, v1.CreateOptions{})
|
||||
out, err = dr.k8sclient.Create(ctx, &obj, orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
out, err = client.Update(newCtx, &obj, v1.UpdateOptions{})
|
||||
out, err = dr.k8sclient.Update(ctx, &obj, orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1646,43 +1503,10 @@ func (dr *DashboardServiceImpl) saveDashboardThroughK8s(ctx context.Context, cmd
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) deleteAllDashboardThroughK8s(ctx context.Context, orgID int64) error {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := dr.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := dr.k8sclient.getClient(newCtx, orgID)
|
||||
if !ok {
|
||||
return fmt.Errorf("could not get k8s client")
|
||||
}
|
||||
|
||||
err = client.DeleteCollection(newCtx, v1.DeleteOptions{}, v1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return dr.k8sclient.DeleteCollection(ctx, orgID)
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) deleteDashboardThroughK8s(ctx context.Context, cmd *dashboards.DeleteDashboardCommand, validateProvisionedDashboard bool) error {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := dr.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := dr.k8sclient.getClient(newCtx, cmd.OrgID)
|
||||
if !ok {
|
||||
return fmt.Errorf("could not get k8s client")
|
||||
}
|
||||
|
||||
// get uid if not passed in
|
||||
if cmd.UID == "" {
|
||||
result, err := dr.GetDashboardUIDByID(ctx, &dashboards.GetDashboardRefByIDQuery{
|
||||
@ -1702,32 +1526,13 @@ func (dr *DashboardServiceImpl) deleteDashboardThroughK8s(ctx context.Context, c
|
||||
gracePeriod = &noGracePeriod
|
||||
}
|
||||
|
||||
err = client.Delete(newCtx, cmd.UID, v1.DeleteOptions{
|
||||
return dr.k8sclient.Delete(ctx, cmd.UID, cmd.OrgID, v1.DeleteOptions{
|
||||
GracePeriodSeconds: gracePeriod,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) listDashboardsThroughK8s(ctx context.Context, orgID int64) ([]*dashboards.Dashboard, error) {
|
||||
// create a new context - prevents issues when the request stems from the k8s api itself
|
||||
// otherwise the context goes through the handlers twice and causes issues
|
||||
newCtx, cancel, err := dr.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := dr.k8sclient.getClient(newCtx, orgID)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
out, err := client.List(newCtx, v1.ListOptions{})
|
||||
out, err := dr.k8sclient.List(ctx, orgID, v1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if out == nil {
|
||||
@ -1747,15 +1552,8 @@ func (dr *DashboardServiceImpl) listDashboardsThroughK8s(ctx context.Context, or
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) searchDashboardsThroughK8sRaw(ctx context.Context, query *dashboards.FindPersistedDashboardsQuery) (*v0alpha1.SearchResults, error) {
|
||||
dashboardskey := &resource.ResourceKey{
|
||||
Namespace: dr.k8sclient.getNamespace(query.OrgId),
|
||||
Group: "dashboard.grafana.app",
|
||||
Resource: "dashboards",
|
||||
}
|
||||
|
||||
request := &resource.ResourceSearchRequest{
|
||||
Options: &resource.ListOptions{
|
||||
Key: dashboardskey,
|
||||
Fields: []*resource.Requirement{},
|
||||
Labels: []*resource.Requirement{},
|
||||
},
|
||||
@ -1841,7 +1639,7 @@ func (dr *DashboardServiceImpl) searchDashboardsThroughK8sRaw(ctx context.Contex
|
||||
request.Limit = query.Limit
|
||||
}
|
||||
|
||||
res, err := dr.k8sclient.getSearcher().Search(ctx, request)
|
||||
res, err := dr.k8sclient.Search(ctx, query.OrgId, request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1874,18 +1672,6 @@ func (dr *DashboardServiceImpl) searchProvisionedDashboardsThroughK8s(ctx contex
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newCtx, cancel, err := dr.getK8sContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if cancel != nil {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
client, ok := dr.k8sclient.getClient(newCtx, query.OrgId)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// loop through all hits concurrently to get the repo information (if set due to file provisioning)
|
||||
dashs := make([]*dashboardProvisioningWithUID, 0)
|
||||
var mu sync.Mutex
|
||||
@ -1893,7 +1679,7 @@ func (dr *DashboardServiceImpl) searchProvisionedDashboardsThroughK8s(ctx contex
|
||||
for _, h := range searchResults.Hits {
|
||||
func(hit v0alpha1.DashboardHit) {
|
||||
g.Go(func() error {
|
||||
out, err := client.Get(ctx, hit.Name, v1.GetOptions{}, "")
|
||||
out, err := dr.k8sclient.Get(ctx, hit.Name, query.OrgId)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if out == nil {
|
||||
|
@ -2,7 +2,6 @@ package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@ -11,13 +10,12 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/client-go/dynamic"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/client"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/services/folder"
|
||||
@ -240,101 +238,16 @@ func TestDashboardService(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
type mockDashK8sCli struct {
|
||||
mock.Mock
|
||||
searcher *mockResourceIndexClient
|
||||
}
|
||||
|
||||
func (m *mockDashK8sCli) getClient(ctx context.Context, orgID int64) (dynamic.ResourceInterface, bool) {
|
||||
args := m.Called(ctx, orgID)
|
||||
return args.Get(0).(dynamic.ResourceInterface), args.Bool(1)
|
||||
}
|
||||
|
||||
func (m *mockDashK8sCli) getNamespace(orgID int64) string {
|
||||
if orgID == 1 {
|
||||
return "default"
|
||||
}
|
||||
return fmt.Sprintf("orgs-%d", orgID)
|
||||
}
|
||||
|
||||
func (m *mockDashK8sCli) getSearcher() resource.ResourceIndexClient {
|
||||
return m.searcher
|
||||
}
|
||||
|
||||
type mockResourceIndexClient struct {
|
||||
mock.Mock
|
||||
resource.ResourceIndexClient
|
||||
}
|
||||
|
||||
func (m *mockResourceIndexClient) Search(ctx context.Context, req *resource.ResourceSearchRequest, opts ...grpc.CallOption) (*resource.ResourceSearchResponse, error) {
|
||||
args := m.Called(req)
|
||||
return args.Get(0).(*resource.ResourceSearchResponse), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *mockResourceIndexClient) GetStats(ctx context.Context, in *resource.ResourceStatsRequest, opts ...grpc.CallOption) (*resource.ResourceStatsResponse, error) {
|
||||
args := m.Called(in)
|
||||
return args.Get(0).(*resource.ResourceStatsResponse), args.Error(1)
|
||||
}
|
||||
|
||||
type mockResourceInterface struct {
|
||||
mock.Mock
|
||||
dynamic.ResourceInterface
|
||||
}
|
||||
|
||||
func (m *mockResourceInterface) Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
|
||||
args := m.Called(ctx, name, options, subresources)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*unstructured.Unstructured), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *mockResourceInterface) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
|
||||
args := m.Called(ctx, opts)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*unstructured.UnstructuredList), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *mockResourceInterface) Create(ctx context.Context, obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) {
|
||||
args := m.Called(ctx, obj, options, subresources)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*unstructured.Unstructured), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *mockResourceInterface) Update(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) {
|
||||
args := m.Called(ctx, obj, options, subresources)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*unstructured.Unstructured), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *mockResourceInterface) Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error {
|
||||
args := m.Called(ctx, name, options, subresources)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *mockResourceInterface) DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
args := m.Called(ctx, options, listOptions)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func setupK8sDashboardTests(service *DashboardServiceImpl) (context.Context, *mockDashK8sCli, *mockResourceInterface) {
|
||||
k8sClientMock := new(mockDashK8sCli)
|
||||
k8sResourceMock := new(mockResourceInterface)
|
||||
k8sClientMock.searcher = new(mockResourceIndexClient)
|
||||
service.k8sclient = k8sClientMock
|
||||
func setupK8sDashboardTests(service *DashboardServiceImpl) (context.Context, *client.MockK8sHandler) {
|
||||
mockCli := new(client.MockK8sHandler)
|
||||
service.k8sclient = mockCli
|
||||
service.features = featuremgmt.WithFeatures(featuremgmt.FlagKubernetesCliDashboards)
|
||||
|
||||
ctx := context.Background()
|
||||
userCtx := &user.SignedInUser{UserID: 1, OrgID: 1}
|
||||
ctx = identity.WithRequester(ctx, userCtx)
|
||||
|
||||
return ctx, k8sClientMock, k8sResourceMock
|
||||
return ctx, mockCli
|
||||
}
|
||||
|
||||
func TestGetDashboard(t *testing.T) {
|
||||
@ -359,7 +272,7 @@ func TestGetDashboard(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
dashboardUnstructured := unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
@ -379,13 +292,12 @@ func TestGetDashboard(t *testing.T) {
|
||||
Version: 1,
|
||||
Data: simplejson.NewFromAny(map[string]any{"test": "test", "title": "testing slugify", "uid": "uid", "version": int64(1)}),
|
||||
}
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sResourceMock.On("Get", mock.Anything, query.UID, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil).Once()
|
||||
k8sCliMock.On("Get", mock.Anything, query.UID, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil).Once()
|
||||
|
||||
dashboard, err := service.GetDashboard(ctx, query)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dashboard)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
// make sure the conversion is working
|
||||
require.True(t, reflect.DeepEqual(dashboard, &dashboardExpected))
|
||||
})
|
||||
@ -396,7 +308,7 @@ func TestGetDashboard(t *testing.T) {
|
||||
UID: "",
|
||||
OrgID: 1,
|
||||
}
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
dashboardUnstructured := unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
@ -416,9 +328,8 @@ func TestGetDashboard(t *testing.T) {
|
||||
Version: 1,
|
||||
Data: simplejson.NewFromAny(map[string]any{"test": "test", "title": "testing slugify", "uid": "uid", "version": int64(1)}),
|
||||
}
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sResourceMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil).Once()
|
||||
k8sClientMock.searcher.On("Search", mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
k8sCliMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil).Once()
|
||||
k8sCliMock.On("Search", mock.Anything, mock.Anything, mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
{
|
||||
@ -449,32 +360,30 @@ func TestGetDashboard(t *testing.T) {
|
||||
dashboard, err := service.GetDashboard(ctx, query)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dashboard)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sClientMock.searcher.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
// make sure the conversion is working
|
||||
require.True(t, reflect.DeepEqual(dashboard, &dashboardExpected))
|
||||
})
|
||||
|
||||
t.Run("Should return error when Kubernetes client fails", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sResourceMock.On("Get", mock.Anything, query.UID, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Get", mock.Anything, query.UID, mock.Anything, mock.Anything).Return(nil, assert.AnError).Once()
|
||||
|
||||
dashboard, err := service.GetDashboard(ctx, query)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, dashboard)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("Should return dashboard not found if Kubernetes client returns nil", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sResourceMock.On("Get", mock.Anything, query.UID, mock.Anything, mock.Anything).Return(nil, nil).Once()
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Get", mock.Anything, query.UID, mock.Anything, mock.Anything).Return(nil, nil).Once()
|
||||
dashboard, err := service.GetDashboard(ctx, query)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, dashboards.ErrDashboardNotFound, err)
|
||||
require.Nil(t, dashboard)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -496,7 +405,7 @@ func TestGetAllDashboards(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
|
||||
dashboardUnstructured := unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
@ -518,13 +427,12 @@ func TestGetAllDashboards(t *testing.T) {
|
||||
Data: simplejson.NewFromAny(map[string]any{"test": "test", "title": "testing slugify", "uid": "uid", "version": int64(1)}),
|
||||
}
|
||||
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sResourceMock.On("List", mock.Anything, mock.Anything).Return(&unstructured.UnstructuredList{Items: []unstructured.Unstructured{dashboardUnstructured}}, nil).Once()
|
||||
k8sCliMock.On("List", mock.Anything, mock.Anything, mock.Anything).Return(&unstructured.UnstructuredList{Items: []unstructured.Unstructured{dashboardUnstructured}}, nil).Once()
|
||||
|
||||
dashes, err := service.GetAllDashboards(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dashes)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
// make sure the conversion is working
|
||||
require.True(t, reflect.DeepEqual(dashes, []*dashboards.Dashboard{&dashboardExpected}))
|
||||
})
|
||||
@ -548,7 +456,7 @@ func TestGetAllDashboardsByOrgId(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
|
||||
dashboardUnstructured := unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
@ -570,13 +478,12 @@ func TestGetAllDashboardsByOrgId(t *testing.T) {
|
||||
Data: simplejson.NewFromAny(map[string]any{"test": "test", "title": "testing slugify", "uid": "uid", "version": int64(1)}),
|
||||
}
|
||||
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sResourceMock.On("List", mock.Anything, mock.Anything).Return(&unstructured.UnstructuredList{Items: []unstructured.Unstructured{dashboardUnstructured}}, nil).Once()
|
||||
k8sCliMock.On("List", mock.Anything, mock.Anything, mock.Anything).Return(&unstructured.UnstructuredList{Items: []unstructured.Unstructured{dashboardUnstructured}}, nil).Once()
|
||||
|
||||
dashes, err := service.GetAllDashboardsByOrgId(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dashes)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
// make sure the conversion is working
|
||||
require.True(t, reflect.DeepEqual(dashes, []*dashboards.Dashboard{&dashboardExpected}))
|
||||
})
|
||||
@ -603,9 +510,8 @@ func TestGetProvisionedDashboardData(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled and get from relevant org", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, mock.Anything).Return(k8sResourceMock, true)
|
||||
k8sResourceMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
"labels": map[string]any{
|
||||
@ -625,10 +531,10 @@ func TestGetProvisionedDashboardData(t *testing.T) {
|
||||
},
|
||||
}}, nil).Once()
|
||||
repo := "test"
|
||||
k8sClientMock.searcher.On("Search",
|
||||
k8sCliMock.On("Search", mock.Anything, int64(1),
|
||||
mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
// ensure the prefix is added to the query
|
||||
return req.Options.Key.Namespace == "default" && req.Options.Fields[0].Values[0] == provisionedFileNameWithPrefix(repo)
|
||||
return req.Options.Fields[0].Values[0] == provisionedFileNameWithPrefix(repo)
|
||||
})).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{},
|
||||
@ -636,9 +542,9 @@ func TestGetProvisionedDashboardData(t *testing.T) {
|
||||
},
|
||||
TotalHits: 0,
|
||||
}, nil).Once()
|
||||
k8sClientMock.searcher.On("Search", mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
k8sCliMock.On("Search", mock.Anything, int64(2), mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
// ensure the prefix is added to the query
|
||||
return req.Options.Key.Namespace == "orgs-2" && req.Options.Fields[0].Values[0] == provisionedFileNameWithPrefix(repo)
|
||||
return req.Options.Fields[0].Values[0] == provisionedFileNameWithPrefix(repo)
|
||||
})).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
@ -677,7 +583,7 @@ func TestGetProvisionedDashboardData(t *testing.T) {
|
||||
CheckSum: "hash",
|
||||
Updated: 1735689600,
|
||||
})
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -702,9 +608,8 @@ func TestGetProvisionedDashboardDataByDashboardID(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled and get from whatever org it is in", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, mock.Anything).Return(k8sResourceMock, true)
|
||||
k8sResourceMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
"labels": map[string]any{
|
||||
@ -723,18 +628,14 @@ func TestGetProvisionedDashboardDataByDashboardID(t *testing.T) {
|
||||
"title": "testing slugify",
|
||||
},
|
||||
}}, nil)
|
||||
k8sClientMock.searcher.On("Search", mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
return req.Options.Key.Namespace == "default"
|
||||
})).Return(&resource.ResourceSearchResponse{
|
||||
k8sCliMock.On("Search", mock.Anything, int64(1), mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{},
|
||||
Rows: []*resource.ResourceTableRow{},
|
||||
},
|
||||
TotalHits: 0,
|
||||
}, nil)
|
||||
k8sClientMock.searcher.On("Search", mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
return req.Options.Key.Namespace == "orgs-2"
|
||||
})).Return(&resource.ResourceSearchResponse{
|
||||
k8sCliMock.On("Search", mock.Anything, int64(2), mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
{
|
||||
@ -771,7 +672,7 @@ func TestGetProvisionedDashboardDataByDashboardID(t *testing.T) {
|
||||
CheckSum: "hash",
|
||||
Updated: 1735689600,
|
||||
})
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -796,9 +697,8 @@ func TestGetProvisionedDashboardDataByDashboardUID(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, mock.Anything).Return(k8sResourceMock, true)
|
||||
k8sResourceMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
"labels": map[string]any{
|
||||
@ -817,7 +717,7 @@ func TestGetProvisionedDashboardDataByDashboardUID(t *testing.T) {
|
||||
"title": "testing slugify",
|
||||
},
|
||||
}}, nil).Once()
|
||||
k8sClientMock.searcher.On("Search", mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
k8sCliMock.On("Search", mock.Anything, mock.Anything, mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
{
|
||||
@ -854,7 +754,7 @@ func TestGetProvisionedDashboardDataByDashboardUID(t *testing.T) {
|
||||
CheckSum: "hash",
|
||||
Updated: 1735689600,
|
||||
})
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -882,12 +782,11 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled, delete across all orgs, but only delete file based provisioned dashboards", func(t *testing.T) {
|
||||
_, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, mock.Anything).Return(k8sResourceMock, true)
|
||||
k8sResourceMock.On("Delete", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
_, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Delete", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
fakeStore.On("CleanupAfterDelete", mock.Anything, &dashboards.DeleteDashboardCommand{UID: "uid", OrgID: 1}).Return(nil).Once()
|
||||
fakeStore.On("CleanupAfterDelete", mock.Anything, &dashboards.DeleteDashboardCommand{UID: "uid3", OrgID: 2}).Return(nil).Once()
|
||||
k8sResourceMock.On("Get", mock.Anything, "uid", mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
k8sCliMock.On("Get", mock.Anything, "uid", mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
"annotations": map[string]any{
|
||||
@ -900,7 +799,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
"spec": map[string]any{},
|
||||
}}, nil).Once()
|
||||
// should not delete this one, because it does not start with "file:"
|
||||
k8sResourceMock.On("Get", mock.Anything, "uid2", mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
k8sCliMock.On("Get", mock.Anything, "uid2", mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid2",
|
||||
"annotations": map[string]any{
|
||||
@ -911,7 +810,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
"spec": map[string]any{},
|
||||
}}, nil).Once()
|
||||
|
||||
k8sResourceMock.On("Get", mock.Anything, "uid3", mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
k8sCliMock.On("Get", mock.Anything, "uid3", mock.Anything, mock.Anything).Return(&unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid3",
|
||||
"annotations": map[string]any{
|
||||
@ -923,9 +822,8 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
},
|
||||
"spec": map[string]any{},
|
||||
}}, nil).Once()
|
||||
k8sClientMock.searcher.On("Search", mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
return req.Options.Key.Namespace == "default" && req.Options.Fields[0].Key == "repo.name" && req.Options.Fields[0].Values[0] == provisionedFileNameWithPrefix("test") &&
|
||||
req.Options.Fields[0].Operator == "notin"
|
||||
k8sCliMock.On("Search", mock.Anything, int64(1), mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
return req.Options.Fields[0].Key == "repo.name" && req.Options.Fields[0].Values[0] == provisionedFileNameWithPrefix("test") && req.Options.Fields[0].Operator == "notin"
|
||||
})).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
@ -954,9 +852,8 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
TotalHits: 1,
|
||||
}, nil).Once()
|
||||
|
||||
k8sClientMock.searcher.On("Search", mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
return req.Options.Key.Namespace == "orgs-2" && req.Options.Fields[0].Key == "repo.name" && req.Options.Fields[0].Values[0] == provisionedFileNameWithPrefix("test") &&
|
||||
req.Options.Fields[0].Operator == "notin"
|
||||
k8sCliMock.On("Search", mock.Anything, int64(2), mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
return req.Options.Fields[0].Key == "repo.name" && req.Options.Fields[0].Values[0] == provisionedFileNameWithPrefix("test") && req.Options.Fields[0].Operator == "notin"
|
||||
})).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
@ -998,7 +895,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
ReaderNames: []string{"test"},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1022,8 +919,7 @@ func TestUnprovisionDashboard(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled - should remove annotations", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, mock.Anything).Return(k8sResourceMock, true)
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
dash := &unstructured.Unstructured{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
@ -1036,7 +932,7 @@ func TestUnprovisionDashboard(t *testing.T) {
|
||||
},
|
||||
"spec": map[string]any{},
|
||||
}}
|
||||
k8sResourceMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(dash, nil)
|
||||
k8sCliMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(dash, nil)
|
||||
dashWithoutAnnotations := &unstructured.Unstructured{Object: map[string]any{
|
||||
"apiVersion": "dashboard.grafana.app/v0alpha1",
|
||||
"kind": "Dashboard",
|
||||
@ -1051,8 +947,9 @@ func TestUnprovisionDashboard(t *testing.T) {
|
||||
},
|
||||
}}
|
||||
// should update it to be without annotations
|
||||
k8sResourceMock.On("Update", mock.Anything, dashWithoutAnnotations, mock.Anything, mock.Anything).Return(dashWithoutAnnotations, nil)
|
||||
k8sClientMock.searcher.On("Search", mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
k8sCliMock.On("Update", mock.Anything, dashWithoutAnnotations, mock.Anything, mock.Anything).Return(dashWithoutAnnotations, nil)
|
||||
k8sCliMock.On("GetNamespace", mock.Anything).Return("default")
|
||||
k8sCliMock.On("Search", mock.Anything, mock.Anything, mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
{
|
||||
@ -1081,7 +978,7 @@ func TestUnprovisionDashboard(t *testing.T) {
|
||||
}, nil)
|
||||
err := service.UnprovisionDashboard(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1106,8 +1003,8 @@ func TestGetDashboardsByPluginID(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, _ := setupK8sDashboardTests(service)
|
||||
k8sClientMock.searcher.On("Search", mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Search", mock.Anything, mock.Anything, mock.MatchedBy(func(req *resource.ResourceSearchRequest) bool {
|
||||
return req.Options.Fields[0].Key == "repo.name" && req.Options.Fields[0].Values[0] == "plugin" &&
|
||||
req.Options.Fields[1].Key == "repo.path" && req.Options.Fields[1].Values[0] == "testing"
|
||||
})).Return(&resource.ResourceSearchResponse{
|
||||
@ -1140,7 +1037,7 @@ func TestGetDashboardsByPluginID(t *testing.T) {
|
||||
dashes, err := service.GetDashboardsByPluginID(ctx, query)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, dashes, 1)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1192,16 +1089,16 @@ func TestSaveProvisionedDashboard(t *testing.T) {
|
||||
}}
|
||||
|
||||
t.Run("Should use Kubernetes create if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
fakeStore.On("SaveProvisionedDashboard", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true)
|
||||
k8sResourceMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
|
||||
k8sResourceMock.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil)
|
||||
k8sCliMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
|
||||
k8sCliMock.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil)
|
||||
k8sCliMock.On("GetNamespace", mock.Anything).Return("default")
|
||||
|
||||
dashboard, err := service.SaveProvisionedDashboard(ctx, query, &dashboards.DashboardProvisioning{})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dashboard)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
// ensure the provisioning data is still saved to the db
|
||||
fakeStore.AssertExpectations(t)
|
||||
})
|
||||
@ -1254,10 +1151,10 @@ func TestSaveDashboard(t *testing.T) {
|
||||
}}
|
||||
|
||||
t.Run("Should use Kubernetes create if feature flags are enabled and dashboard doesn't exist", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true)
|
||||
k8sResourceMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
|
||||
k8sResourceMock.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil)
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
|
||||
k8sCliMock.On("GetNamespace", mock.Anything).Return("default")
|
||||
k8sCliMock.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil)
|
||||
|
||||
dashboard, err := service.SaveDashboard(ctx, query, false)
|
||||
require.NoError(t, err)
|
||||
@ -1265,10 +1162,10 @@ func TestSaveDashboard(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes update if feature flags are enabled and dashboard exists", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true)
|
||||
k8sResourceMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil)
|
||||
k8sResourceMock.On("Update", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil)
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil)
|
||||
k8sCliMock.On("GetNamespace", mock.Anything).Return("default")
|
||||
k8sCliMock.On("Update", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil)
|
||||
|
||||
dashboard, err := service.SaveDashboard(ctx, query, false)
|
||||
require.NoError(t, err)
|
||||
@ -1276,10 +1173,10 @@ func TestSaveDashboard(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should return an error if uid is invalid", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true)
|
||||
k8sResourceMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
|
||||
k8sResourceMock.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil)
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Get", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
|
||||
k8sCliMock.On("GetNamespace", mock.Anything).Return("default")
|
||||
k8sCliMock.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&dashboardUnstructured, nil)
|
||||
|
||||
query.Dashboard.UID = "invalid/uid"
|
||||
_, err := service.SaveDashboard(ctx, query, false)
|
||||
@ -1305,22 +1202,20 @@ func TestDeleteDashboard(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sResourceMock.On("Delete", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Delete", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
fakeStore.On("CleanupAfterDelete", mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
err := service.DeleteDashboard(ctx, 1, "uid", 1)
|
||||
require.NoError(t, err)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
|
||||
t.Run("If UID is not passed in, it should retrieve that first", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sResourceMock.On("Delete", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Delete", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
fakeStore.On("CleanupAfterDelete", mock.Anything, mock.Anything).Return(nil).Once()
|
||||
k8sClientMock.searcher.On("Search", mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
k8sCliMock.On("Search", mock.Anything, mock.Anything, mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
{
|
||||
@ -1349,8 +1244,8 @@ func TestDeleteDashboard(t *testing.T) {
|
||||
}, nil)
|
||||
err := service.DeleteDashboard(ctx, 1, "", 1)
|
||||
require.NoError(t, err)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sClientMock.searcher.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1371,13 +1266,12 @@ func TestDeleteAllDashboards(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sResourceMock.On("DeleteCollection", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("DeleteCollection", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
err := service.DeleteAllDashboards(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
k8sClientMock.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1439,9 +1333,8 @@ func TestSearchDashboards(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sClientMock.searcher.On("Search", mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Search", mock.Anything, mock.Anything, mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
{
|
||||
@ -1488,7 +1381,7 @@ func TestSearchDashboards(t *testing.T) {
|
||||
result, err := service.SearchDashboards(ctx, &query)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedResult, result)
|
||||
k8sClientMock.searcher.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1541,9 +1434,8 @@ func TestGetDashboards(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sClientMock.searcher.On("Search", mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Search", mock.Anything, mock.Anything, mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
{
|
||||
@ -1585,13 +1477,13 @@ func TestGetDashboards(t *testing.T) {
|
||||
result, err := service.GetDashboards(ctx, queryByIDs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedResult, result)
|
||||
k8sClientMock.searcher.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
|
||||
// by uids
|
||||
result, err = service.GetDashboards(ctx, queryByUIDs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedResult, result)
|
||||
k8sClientMock.searcher.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1621,9 +1513,8 @@ func TestGetDashboardUIDByID(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sClientMock.searcher.On("Search", mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Search", mock.Anything, mock.Anything, mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
Results: &resource.ResourceTable{
|
||||
Columns: []*resource.ResourceTableColumnDefinition{
|
||||
{
|
||||
@ -1653,7 +1544,7 @@ func TestGetDashboardUIDByID(t *testing.T) {
|
||||
result, err := service.GetDashboardUIDByID(ctx, query)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedResult, result)
|
||||
k8sClientMock.searcher.AssertExpectations(t)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1741,9 +1632,8 @@ func TestGetDashboardTags(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, k8sResourceMock := setupK8sDashboardTests(service)
|
||||
k8sClientMock.On("getClient", mock.Anything, int64(1)).Return(k8sResourceMock, true).Once()
|
||||
k8sClientMock.searcher.On("Search", mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("Search", mock.Anything, mock.Anything, mock.Anything).Return(&resource.ResourceSearchResponse{
|
||||
Facet: map[string]*resource.ResourceSearchResponse_Facet{
|
||||
"tags": {
|
||||
Terms: []*resource.ResourceSearchResponse_TermFacet{
|
||||
@ -1811,11 +1701,11 @@ func TestQuotaCount(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, _ := setupK8sDashboardTests(service)
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
orgSvc := orgtest.FakeOrgService{ExpectedOrgs: orgs}
|
||||
service.orgService = &orgSvc
|
||||
k8sClientMock.searcher.On("GetStats", mock.Anything).Return(&countOrg2, nil).Once()
|
||||
k8sClientMock.searcher.On("GetStats", mock.Anything).Return(&countOrg1, nil).Once()
|
||||
k8sCliMock.On("GetStats", mock.Anything, mock.Anything).Return(&countOrg2, nil).Once()
|
||||
k8sCliMock.On("GetStats", mock.Anything, mock.Anything).Return(&countOrg1, nil).Once()
|
||||
|
||||
result, err := service.Count(ctx, query)
|
||||
require.NoError(t, err)
|
||||
@ -1858,8 +1748,8 @@ func TestCountDashboardsInOrg(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Should use Kubernetes client if feature flags are enabled", func(t *testing.T) {
|
||||
ctx, k8sClientMock, _ := setupK8sDashboardTests(service)
|
||||
k8sClientMock.searcher.On("GetStats", mock.Anything).Return(&count, nil).Once()
|
||||
ctx, k8sCliMock := setupK8sDashboardTests(service)
|
||||
k8sCliMock.On("GetStats", mock.Anything, mock.Anything).Return(&count, nil).Once()
|
||||
result, err := service.CountDashboardsInOrg(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, result, int64(3))
|
||||
|
Loading…
Reference in New Issue
Block a user