Merge remote-tracking branch 'origin/main' into resource-store

This commit is contained in:
Ryan McKinley 2024-07-02 14:45:45 -07:00
commit 33917141f0
53 changed files with 1553 additions and 566 deletions

View File

@ -2487,9 +2487,7 @@ exports[`better eslint`] = {
[0, 0, 0, "No untranslated strings. Wrap text with <Trans />", "10"],
[0, 0, 0, "No untranslated strings. Wrap text with <Trans />", "11"],
[0, 0, 0, "No untranslated strings. Wrap text with <Trans />", "12"],
[0, 0, 0, "No untranslated strings. Wrap text with <Trans />", "13"],
[0, 0, 0, "No untranslated strings. Wrap text with <Trans />", "14"],
[0, 0, 0, "No untranslated strings. Wrap text with <Trans />", "15"]
[0, 0, 0, "No untranslated strings. Wrap text with <Trans />", "13"]
],
"public/app/features/alerting/unified/home/Insights.tsx:5381": [
[0, 0, 0, "No untranslated strings. Wrap text with <Trans />", "0"]

1
.github/CODEOWNERS vendored
View File

@ -548,6 +548,7 @@ playwright.config.ts @grafana/plugins-platform-frontend
/scripts/verify-repo-update/ @grafana/grafana-release-guild
/scripts/generate-icon-bundle.js @grafana/plugins-platform-frontend @grafana/grafana-frontend-platform
/scripts/generate-rtk-apis.ts @grafana/grafana-frontend-platform
/scripts/generate-alerting-rtk-apis.ts @grafana/alerting-frontend
/scripts/levitate-parse-json-report.js @grafana/plugins-platform-frontend
/scripts/codemods/explicit-barrel-imports.cjs @grafana/frontend-ops

View File

@ -19,64 +19,56 @@ labels:
menuTitle: Grafana OnCall
title: Configure Grafana OnCall for Alerting
weight: 300
refs:
oncall-integration:
- pattern: /docs/grafana/
destination: /docs/oncall/latest/integrations/grafana-alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/oncall/integrations/grafana-alerting/
create-notification-policy:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/configure-notifications/create-notification-policy/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/create-notification-policy/
escalation-chain:
- pattern: /docs/grafana/
destination: /docs/oncall/latest/configure/escalation-chains-and-routes/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/oncall/configure/escalation-chains-and-routes/
---
## Configure Grafana OnCall for Alerting
# Configure Grafana OnCall for Alerting
Use the Grafana Alerting - Grafana OnCall integration to effortlessly connect alerts generated by Grafana Alerting with Grafana OnCall, where you can then route them according to defined escalation chains and schedules.
You can set up the integration using the Grafana Alerting application or the Grafana OnCall application. For more information on setting it up from the Grafana OnCall application, see [Grafana OnCall documentation](ref:oncall-integration).
{{< admonition type="note" >}}
You can also configure the integration from Grafana OnCall. For more information, refer to [Grafana OnCall documentation](http://grafana.com/docs/oncall/latest/integrations/grafana-alerting/).
{{< /admonition >}}
### Before you begin
## Before you begin
- Ensure you have Installed and enabled the Grafana OnCall plugin
- Ensure your version of Grafana is up-to-date and supports the new features
Grafana OnCall is available in Grafana Cloud by default.
### Procedure
If you are using Grafana OSS, [install and enable the Grafana OnCall plugin](http://grafana.com/docs/oncall/latest/set-up/open-source/#install-grafana-oncall-oss). Also, ensure your version of Grafana is up-to-date and supports the new features.
## Procedure
To set up the Grafana OnCall integration using the Grafana Alerting application, complete the following steps.
1. Navigate to **Alerts&IRM** -> **Alerting** -> **Contact points**.
1. Navigate to **Alerts & IRM** -> **Alerting** -> **Contact points**.
1. Click **+ Add contact point**.
1. Enter a contact point name.
1. From the Integration list, select Grafana OnCall.
1. From the **Integration** list, select **Grafana OnCall**.
{{< admonition type="note" >}}
The Grafana OnCall integration is only available for Grafana Alertmanager.
{{< /admonition >}}
{{< admonition type="note" >}}
The Grafana OnCall integration is only available for Grafana Alertmanager.
{{< /admonition >}}
1. Choose whether to add a new OnCall integration or add an existing one.
- If you add a new one, enter an Integration name.
- If you add an existing one, choose from the list of available integrations
- If you add an existing one, choose from the list of available integrations.
1. Click **Save contact point**.
1. On the contact points list view page, you should see a link to Grafana OnCall.
1. On the Contact points list view page, you can see the contact point with the Grafana OnCall icon.
If the integration is not being used anywhere in the notification policies tree, it has **Unused** as its status in the **Health** column. It wont receive any notifications, because there are no notifications using that integration.
If the integration is not yet being used anywhere in the notification policies tree, it will have **Unused** as its status in the **Health** column. It wont receive any notifications, because there are no notifications using that integration.
1. To see the integration details and test the integration, click the link to Grafana OnCall on the contact points list view page.
1. Click **Send demo alert** > **Send alert**.
1. Go to Grafana OnCall **Alert Groups**. You should see the demo alert.
1. Connect your contact point to a notification policy.
## Next steps
For more information on connecting your contact point to a notification policy, see
[Create notification policy](ref:create-notification-policy).
The OnCall contact point is ready to receive alert notifications.
1. To view your integration in the Grafana OnCall application and set up routes and escalation chains, click the Link next to the integration on the Contact points list view page in the **Type** column.
To add the contact point to your alert rule, complete the following next steps:
This redirects you to the Grafana OnCall integration page in the Grafana OnCall application. From there, you can add [routes and escalation chains](ref:escalation-chain).
1. In Grafana, navigate to **Alerting** > **Alert rules**.
1. Edit or create a new alert rule.
1. Scroll down to the **Configure labels and notifications** section.
1. Under **Notifications**, click **Select contact point**.
1. From the drop-down menu, select the contact point you created.
1. Click **Save rule and exit**.

View File

@ -24,11 +24,16 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/create-dashboard-url-variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/visualizations/dashboards/build-dashboards/create-dashboard-url-variables/
dashboard:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/create-dashboard/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/visualizations/dashboards/build-dashboards/create-dashboard/
---
# Dashboard list
Dashboard lists allow you to display dynamic links to other dashboards. The list can be configured to use starred dashboards, recently viewed dashboards, a search query, and dashboard tags.
Dashboard lists allow you to display dynamic links to other dashboards. You can configure the list to use starred dashboards, recently viewed dashboards, a search query, and dashboard tags.
{{< figure src="/static/img/docs/v45/dashboard-list-panels.png" max-width="850px" alt="A dashboard list visualization" >}}
@ -38,7 +43,7 @@ You can use a dashboard list visualization to display a list of important dashbo
## Configure a dashboard list visualization
Once youve created a [dashboard](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/create-dashboard/), the following video shows you how to configure a dashboard list visualization:
Once youve created a [dashboard](ref:dashboard), the following video shows you how to configure a dashboard list visualization:
{{< youtube id="MserjWGWsh8" >}}
@ -58,7 +63,7 @@ Select this option to propagate the time range of the current dashboard to the d
### Include current template variable values
Select this option to include template variables currently used as query parameters in a link. When you click the link, any matching templates in the linked dashboard are set to the values from the link. Learn more in [Dashboard URL variables](ref:dashboard-url-variables).
Select this option to include template variables that are being used as query parameters in a link. When you click the link, any matching templates in the linked dashboard are set to the values from the link. Learn more in [Dashboard URL variables](ref:dashboard-url-variables).
### Starred
@ -70,11 +75,15 @@ Display recently viewed dashboards in alphabetical order.
### Search
Display dashboards by search query or tags. You must enter at least one value in **Query** or **Tags**. For the **Query** and **Tags** fields, variable interpolation is supported. For example, `$my_var` or `${my_var}`.
Display dashboards by search query or tags. You must enter at least one value in **Query** or **Tags**. For the **Query** and **Tags** fields, variable interpolation is supported. For example, `$my_var` or `${my_var}`. Learn more in [Search option](#search-options).
### Show headings
The selected list section (**Starred**, **Recently viewed**, **Search**) is shown as a heading.
The selected list section is shown as a heading:
- **Starred**
- **Recently viewed**
- **Search**
### Max items
@ -82,11 +91,11 @@ Sets the maximum number of items to list per section. For example, if you leave
## Search options
These options only apply if the **Search** option is selected.
These options only apply if you select the **Search** option.
### Query
Enter the query by which you want to search. Queries are case-insensitive and partial values are accepted.
Use this field to search by dashboard name. Query terms are case-insensitive and partial values are accepted. For example, if you have dashboards called "Indoor Temps" and "Outdoor temp", entering the word "temp" would return both results.
### Folder
@ -94,6 +103,8 @@ Select the dashboard folders that you want to display.
### Tags
Enter tags by which you want to search. Note that existing tags don't appear as you type, and they _are_ case sensitive.
Enter tags by which you want to search. Note that tags don't appear as you type, and they're case sensitive. Tag search uses an `OR` condition, so if a dashboard has one of the defined tags, it's included in the list.
> **Note:** When multiple tags and strings appear, the dashboard list displays those matching _all_ conditions.
{{< admonition type="note" >}}
When multiple tags and strings appear, the dashboard list displays those matching _all_ conditions.
{{< /admonition >}}

View File

@ -192,6 +192,7 @@ Experimental features might be changed or removed without prior notice.
| `databaseReadReplica` | Use a read replica for some database queries. |
| `alertingApiServer` | Register Alerting APIs with the K8s API server |
| `dashboardRestoreUI` | Enables the frontend to be able to restore a recently deleted dashboard |
| `cloudwatchMetricInsightsCrossAccount` | Enables cross account observability for Cloudwatch Metric Insights |
## Development feature toggles

View File

@ -1740,10 +1740,10 @@ Status: Accepted
{{% responsive-table %}}
| Name | Type | Go type | Required | Default | Description | Example |
| ----------- | ------------------------- | ------- | :------: | ------- | ----------- | ------- |
| EndMinute | int64 (formatted integer) | `int64` | | | | |
| StartMinute | int64 (formatted integer) | `int64` | | | | |
| Name | Type | Go type | Required | Default | Description | Example |
| ---------- | ------ | -------- | :------: | ------- | ----------- | ----------------------- |
| end_time | string | `string` | | | | `"end_time": "24:00"` |
| start_time | string | `string` | | | | `"start_time": "18:00"` |
{{% /responsive-table %}}

View File

@ -200,4 +200,5 @@ export interface FeatureToggles {
dashboardRestoreUI?: boolean;
cloudWatchRoundUpEndTime?: boolean;
bodyScrolling?: boolean;
cloudwatchMetricInsightsCrossAccount?: boolean;
}

View File

@ -108,6 +108,10 @@ func (hs *HTTPServer) registerRoutes() {
r.Get("/admin/storage/*", reqSignedIn, hs.Index)
}
if hs.Features.IsEnabledGlobally(featuremgmt.FlagOnPremToCloudMigrations) {
r.Get("/admin/migrate-to-cloud", reqOrgAdmin, hs.Index)
}
// feature toggle admin page
if hs.Features.IsEnabledGlobally(featuremgmt.FlagFeatureToggleAdminPage) {
r.Get("/admin/featuretoggles", authorize(ac.EvalPermission(ac.ActionFeatureManagementRead)), hs.Index)

View File

@ -26,6 +26,7 @@ import (
acdb "github.com/grafana/grafana/pkg/services/accesscontrol/database"
"github.com/grafana/grafana/pkg/services/accesscontrol/ossaccesscontrol"
"github.com/grafana/grafana/pkg/services/accesscontrol/resourcepermissions"
"github.com/grafana/grafana/pkg/services/authz/zanzana"
"github.com/grafana/grafana/pkg/services/contexthandler/ctxkey"
contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model"
"github.com/grafana/grafana/pkg/services/dashboards"
@ -460,7 +461,10 @@ func setupServer(b testing.TB, sc benchScenario, features featuremgmt.FeatureTog
cfg := setting.NewCfg()
actionSets := resourcepermissions.NewActionSetService()
acSvc := acimpl.ProvideOSSService(sc.cfg, acdb.ProvideService(sc.db), actionSets, localcache.ProvideService(), features, tracing.InitializeTracerForTest())
acSvc := acimpl.ProvideOSSService(
sc.cfg, acdb.ProvideService(sc.db), actionSets, localcache.ProvideService(),
features, tracing.InitializeTracerForTest(), zanzana.NewNoopClient(), sc.db,
)
folderPermissions, err := ossaccesscontrol.ProvideFolderPermissions(
cfg, features, routing.NewRouteRegister(), sc.db, ac, license, &dashboards.FakeDashboardStore{}, folderServiceWithFlagOn, acSvc, sc.teamSvc, sc.userSvc, actionSets)

View File

@ -22,6 +22,7 @@ import (
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/accesscontrol"
"github.com/grafana/grafana/pkg/services/accesscontrol/acimpl"
"github.com/grafana/grafana/pkg/services/authz/zanzana"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/quota/quotaimpl"
"github.com/grafana/grafana/pkg/services/sqlstore"
@ -89,7 +90,7 @@ func initializeConflictResolver(cmd *utils.ContextCommandLine, f Formatter, ctx
if err != nil {
return nil, fmt.Errorf("%v: %w", "failed to initialize tracer service", err)
}
acService, err := acimpl.ProvideService(cfg, s, routing, nil, nil, nil, features, tracer)
acService, err := acimpl.ProvideService(cfg, s, routing, nil, nil, nil, features, tracer, zanzana.NewNoopClient())
if err != nil {
return nil, fmt.Errorf("%v: %w", "failed to get access control", err)
}

View File

@ -5,6 +5,8 @@ import (
"os"
"github.com/spf13/cobra"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/component-base/cli"
@ -108,14 +110,31 @@ func RunCLI(opts commands.ServerOptions) int {
return cli.Run(cmd)
}
type lateInitializedTracingProvider struct {
trace.TracerProvider
tracer *lateInitializedTracingService
}
func (tp lateInitializedTracingProvider) Tracer(name string, options ...trace.TracerOption) trace.Tracer {
return tp.tracer
}
type lateInitializedTracingService struct {
tracing.Tracer
}
func newLateInitializedTracingService() *lateInitializedTracingService {
return &lateInitializedTracingService{
ts := &lateInitializedTracingService{
Tracer: tracing.InitializeTracerForTest(),
}
tp := &lateInitializedTracingProvider{
tracer: ts,
}
otel.SetTracerProvider(tp)
return ts
}
func (s *lateInitializedTracingService) InitTracer(tracer tracing.Tracer) {

View File

@ -150,7 +150,7 @@ func (r *queryREST) Connect(connectCtx context.Context, name string, _ runtime.O
func (b *QueryAPIBuilder) execute(ctx context.Context, req parsedRequestInfo) (qdr *backend.QueryDataResponse, err error) {
switch len(req.Requests) {
case 0:
break // nothing to do
qdr = &backend.QueryDataResponse{}
case 1:
qdr, err = b.handleQuerySingleDatasource(ctx, req.Requests[0])
default:

View File

@ -26,6 +26,7 @@ import (
"github.com/grafana/grafana/pkg/services/accesscontrol/migrator"
"github.com/grafana/grafana/pkg/services/accesscontrol/pluginutils"
"github.com/grafana/grafana/pkg/services/authn"
"github.com/grafana/grafana/pkg/services/authz/zanzana"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/folder"
@ -46,8 +47,12 @@ var SharedWithMeFolderPermission = accesscontrol.Permission{
var OSSRolesPrefixes = []string{accesscontrol.ManagedRolePrefix, accesscontrol.ExternalServiceRolePrefix}
func ProvideService(cfg *setting.Cfg, db db.DB, routeRegister routing.RouteRegister, cache *localcache.CacheService, accessControl accesscontrol.AccessControl, actionResolver accesscontrol.ActionResolver, features featuremgmt.FeatureToggles, tracer tracing.Tracer) (*Service, error) {
service := ProvideOSSService(cfg, database.ProvideService(db), actionResolver, cache, features, tracer)
func ProvideService(
cfg *setting.Cfg, db db.DB, routeRegister routing.RouteRegister, cache *localcache.CacheService,
accessControl accesscontrol.AccessControl, actionResolver accesscontrol.ActionResolver,
features featuremgmt.FeatureToggles, tracer tracing.Tracer, zclient zanzana.Client,
) (*Service, error) {
service := ProvideOSSService(cfg, database.ProvideService(db), actionResolver, cache, features, tracer, zclient, db)
api.NewAccessControlAPI(routeRegister, accessControl, service, features).RegisterAPIEndpoints()
if err := accesscontrol.DeclareFixedRoles(service, cfg); err != nil {
@ -65,7 +70,11 @@ func ProvideService(cfg *setting.Cfg, db db.DB, routeRegister routing.RouteRegis
return service, nil
}
func ProvideOSSService(cfg *setting.Cfg, store accesscontrol.Store, actionResolver accesscontrol.ActionResolver, cache *localcache.CacheService, features featuremgmt.FeatureToggles, tracer tracing.Tracer) *Service {
func ProvideOSSService(
cfg *setting.Cfg, store accesscontrol.Store, actionResolver accesscontrol.ActionResolver,
cache *localcache.CacheService, features featuremgmt.FeatureToggles, tracer tracing.Tracer,
zclient zanzana.Client, db db.DB,
) *Service {
s := &Service{
actionResolver: actionResolver,
cache: cache,
@ -75,6 +84,7 @@ func ProvideOSSService(cfg *setting.Cfg, store accesscontrol.Store, actionResolv
roles: accesscontrol.BuildBasicRoleDefinitions(),
store: store,
tracer: tracer,
sync: migrator.NewZanzanaSynchroniser(zclient, db),
}
return s
@ -91,6 +101,7 @@ type Service struct {
roles map[string]*accesscontrol.RoleDTO
store accesscontrol.Store
tracer tracing.Tracer
sync *migrator.ZanzanaSynchroniser
}
func (s *Service) GetUsageStats(_ context.Context) map[string]any {
@ -397,6 +408,13 @@ func (s *Service) RegisterFixedRoles(ctx context.Context) error {
}
return true
})
if s.features.IsEnabledGlobally(featuremgmt.FlagZanzana) {
if err := s.sync.Sync(context.Background()); err != nil {
s.log.Error("Failed to synchronise permissions to zanzana ", "err", err)
}
}
return nil
}

View File

@ -69,6 +69,8 @@ func TestUsageMetrics(t *testing.T) {
localcache.ProvideService(),
featuremgmt.WithFeatures(),
tracing.InitializeTracerForTest(),
nil,
nil,
)
assert.Equal(t, tt.expectedValue, s.GetUsageStats(context.Background())["stats.oss.accesscontrol.enabled.count"])
})

View File

@ -0,0 +1,128 @@
package migrator
import (
"context"
"fmt"
"strconv"
"strings"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/authz/zanzana"
)
// A TupleCollector is responsible to build and store [openfgav1.TupleKey] into provided tuple map.
// They key used should be a unique group key for the collector so we can skip over an already synced group.
type TupleCollector func(ctx context.Context, tuples map[string][]*openfgav1.TupleKey) error
// ZanzanaSynchroniser is a component to sync RBAC permissions to zanzana.
// We should rewrite the migration after we have "migrated" all possible actions
// into our schema. This will only do a one time migration for each action so its
// is not really syncing the full rbac state. If a fresh sync is needed the tuple
// needs to be cleared first.
type ZanzanaSynchroniser struct {
log log.Logger
client zanzana.Client
collectors []TupleCollector
}
func NewZanzanaSynchroniser(client zanzana.Client, store db.DB, collectors ...TupleCollector) *ZanzanaSynchroniser {
// Append shared collectors that is used by both enterprise and oss
collectors = append(collectors, managedPermissionsCollector(store))
return &ZanzanaSynchroniser{
log: log.New("zanzana.sync"),
collectors: collectors,
}
}
// Sync runs all collectors and tries to write all collected tuples.
// It will skip over any "sync group" that has already been written.
func (z *ZanzanaSynchroniser) Sync(ctx context.Context) error {
tuplesMap := make(map[string][]*openfgav1.TupleKey)
for _, c := range z.collectors {
if err := c(ctx, tuplesMap); err != nil {
return fmt.Errorf("failed to collect permissions: %w", err)
}
}
for key, tuples := range tuplesMap {
if err := batch(len(tuples), 100, func(start, end int) error {
return z.client.Write(ctx, &openfgav1.WriteRequest{
Writes: &openfgav1.WriteRequestWrites{
TupleKeys: tuples[start:end],
},
})
}); err != nil {
if strings.Contains(err.Error(), "cannot write a tuple which already exists") {
z.log.Debug("Skipping already synced permissions", "sync_key", key)
continue
}
return err
}
}
return nil
}
// managedPermissionsCollector collects managed permissions into provided tuple map.
// It will only store actions that are supported by our schema. Managed permissions can
// be directly mapped to user/team/role without having to write an intermediate role.
func managedPermissionsCollector(store db.DB) TupleCollector {
return func(ctx context.Context, tuples map[string][]*openfgav1.TupleKey) error {
const collectorID = "managed"
const query = `
SELECT ur.user_id, p.action, p.kind, p.identifier, r.org_id FROM permission p
INNER JOIN role r on p.role_id = r.id
LEFT JOIN user_role ur on r.id = ur.role_id
LEFT JOIN team_role tr on r.id = tr.role_id
LEFT JOIN builtin_role br on r.id = br.role_id
WHERE r.name LIKE 'managed:%'
`
type Permission struct {
RoleName string `xorm:"role_name"`
OrgID int64 `xorm:"org_id"`
Action string `xorm:"action"`
Kind string
Identifier string
UserID int64 `xorm:"user_id"`
TeamID int64 `xorm:"user_id"`
}
var permissions []Permission
err := store.WithDbSession(ctx, func(sess *db.Session) error {
return sess.SQL(query).Find(&permissions)
})
if err != nil {
return err
}
for _, p := range permissions {
var subject string
if p.UserID > 0 {
subject = zanzana.NewObject(zanzana.TypeUser, strconv.FormatInt(p.UserID, 10))
} else if p.TeamID > 0 {
subject = zanzana.NewObject(zanzana.TypeTeam, strconv.FormatInt(p.TeamID, 10))
} else {
// FIXME(kalleep): Unsuported role binding (org role). We need to have basic roles in place
continue
}
tuple, ok := zanzana.TranslateToTuple(subject, p.Action, p.Kind, p.Identifier, p.OrgID)
if !ok {
continue
}
// our "sync key" is a combination of collectorID and action so we can run this
// sync new data when more actions are supported
key := fmt.Sprintf("%s-%s", collectorID, p.Action)
tuples[key] = append(tuples[key], tuple)
}
return nil
}
}

View File

@ -136,6 +136,16 @@ func (z *Zanzana) start(ctx context.Context) error {
}
func (z *Zanzana) running(ctx context.Context) error {
if z.cfg.Env == setting.Dev && z.cfg.Zanzana.ListenHTTP {
go func() {
z.logger.Info("Starting OpenFGA HTTP server")
err := zanzana.StartOpenFGAHttpSever(z.cfg, z.handle, z.logger)
if err != nil {
z.logger.Error("failed to start OpenFGA HTTP server", "error", err)
}
}()
}
// Run is blocking so we can just run it here
return z.handle.Run(ctx)
}

View File

@ -15,8 +15,9 @@ import (
// Client is a wrapper around [openfgav1.OpenFGAServiceClient]
type Client interface {
Check(ctx context.Context, in *openfgav1.CheckRequest, opts ...grpc.CallOption) (*openfgav1.CheckResponse, error)
ListObjects(ctx context.Context, in *openfgav1.ListObjectsRequest, opts ...grpc.CallOption) (*openfgav1.ListObjectsResponse, error)
Check(ctx context.Context, in *openfgav1.CheckRequest) (*openfgav1.CheckResponse, error)
ListObjects(ctx context.Context, in *openfgav1.ListObjectsRequest) (*openfgav1.ListObjectsResponse, error)
Write(ctx context.Context, in *openfgav1.WriteRequest) error
}
func NewClient(ctx context.Context, cc grpc.ClientConnInterface, cfg *setting.Cfg) (*client.Client, error) {
@ -27,3 +28,7 @@ func NewClient(ctx context.Context, cc grpc.ClientConnInterface, cfg *setting.Cf
client.WithLogger(log.New("zanzana-client")),
)
}
func NewNoopClient() *client.NoopClient {
return client.NewNoop()
}

View File

@ -70,12 +70,23 @@ func New(ctx context.Context, cc grpc.ClientConnInterface, opts ...ClientOption)
return c, nil
}
func (c *Client) Check(ctx context.Context, in *openfgav1.CheckRequest, opts ...grpc.CallOption) (*openfgav1.CheckResponse, error) {
return c.client.Check(ctx, in, opts...)
func (c *Client) Check(ctx context.Context, in *openfgav1.CheckRequest) (*openfgav1.CheckResponse, error) {
in.StoreId = c.storeID
in.AuthorizationModelId = c.modelID
return c.client.Check(ctx, in)
}
func (c *Client) ListObjects(ctx context.Context, in *openfgav1.ListObjectsRequest, opts ...grpc.CallOption) (*openfgav1.ListObjectsResponse, error) {
return c.client.ListObjects(ctx, in, opts...)
func (c *Client) ListObjects(ctx context.Context, in *openfgav1.ListObjectsRequest) (*openfgav1.ListObjectsResponse, error) {
in.StoreId = c.storeID
in.AuthorizationModelId = c.modelID
return c.client.ListObjects(ctx, in)
}
func (c *Client) Write(ctx context.Context, in *openfgav1.WriteRequest) error {
in.StoreId = c.storeID
in.AuthorizationModelId = c.modelID
_, err := c.client.Write(ctx, in)
return err
}
func (c *Client) getOrCreateStore(ctx context.Context, name string) (*openfgav1.Store, error) {

View File

@ -3,8 +3,6 @@ package client
import (
"context"
"google.golang.org/grpc"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
)
@ -14,10 +12,14 @@ func NewNoop() *NoopClient {
type NoopClient struct{}
func (nc NoopClient) Check(ctx context.Context, in *openfgav1.CheckRequest, opts ...grpc.CallOption) (*openfgav1.CheckResponse, error) {
func (nc NoopClient) Check(ctx context.Context, in *openfgav1.CheckRequest) (*openfgav1.CheckResponse, error) {
return nil, nil
}
func (nc NoopClient) ListObjects(ctx context.Context, in *openfgav1.ListObjectsRequest, opts ...grpc.CallOption) (*openfgav1.ListObjectsResponse, error) {
func (nc NoopClient) ListObjects(ctx context.Context, in *openfgav1.ListObjectsRequest) (*openfgav1.ListObjectsResponse, error) {
return nil, nil
}
func (nc NoopClient) Write(ctx context.Context, in *openfgav1.WriteRequest) error {
return nil
}

View File

@ -1,10 +1,27 @@
package zanzana
import (
"context"
"fmt"
"net/http"
"time"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
httpmiddleware "github.com/openfga/openfga/pkg/middleware/http"
"github.com/openfga/openfga/pkg/server"
serverErrors "github.com/openfga/openfga/pkg/server/errors"
"github.com/openfga/openfga/pkg/storage"
"github.com/rs/cors"
"go.uber.org/zap/zapcore"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
healthv1pb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/grpcserver"
"github.com/grafana/grafana/pkg/setting"
)
func NewServer(store storage.OpenFGADatastore, logger log.Logger) (*server.Server, error) {
@ -24,3 +41,70 @@ func NewServer(store storage.OpenFGADatastore, logger log.Logger) (*server.Serve
return srv, nil
}
// StartOpenFGAHttpSever starts HTTP server which allows to use fga cli.
func StartOpenFGAHttpSever(cfg *setting.Cfg, srv grpcserver.Provider, logger log.Logger) error {
dialOpts := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
}
addr := srv.GetAddress()
// Wait until GRPC server is initialized
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
maxRetries := 100
retries := 0
for addr == "" && retries < maxRetries {
<-ticker.C
addr = srv.GetAddress()
retries++
}
if addr == "" {
return fmt.Errorf("failed to start HTTP server: GRPC server unavailable")
}
conn, err := grpc.NewClient(addr, dialOpts...)
if err != nil {
return fmt.Errorf("unable to dial GRPC: %w", err)
}
muxOpts := []runtime.ServeMuxOption{
runtime.WithForwardResponseOption(httpmiddleware.HTTPResponseModifier),
runtime.WithErrorHandler(func(c context.Context,
sr *runtime.ServeMux, mm runtime.Marshaler, w http.ResponseWriter, r *http.Request, e error) {
intCode := serverErrors.ConvertToEncodedErrorCode(status.Convert(e))
httpmiddleware.CustomHTTPErrorHandler(c, w, r, serverErrors.NewEncodedError(intCode, e.Error()))
}),
runtime.WithStreamErrorHandler(func(ctx context.Context, e error) *status.Status {
intCode := serverErrors.ConvertToEncodedErrorCode(status.Convert(e))
encodedErr := serverErrors.NewEncodedError(intCode, e.Error())
return status.Convert(encodedErr)
}),
runtime.WithHealthzEndpoint(healthv1pb.NewHealthClient(conn)),
runtime.WithOutgoingHeaderMatcher(func(s string) (string, bool) { return s, true }),
}
mux := runtime.NewServeMux(muxOpts...)
if err := openfgav1.RegisterOpenFGAServiceHandler(context.TODO(), mux, conn); err != nil {
return fmt.Errorf("failed to register gateway handler: %w", err)
}
httpServer := &http.Server{
Addr: cfg.Zanzana.HttpAddr,
Handler: cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowCredentials: true,
AllowedHeaders: []string{"*"},
AllowedMethods: []string{http.MethodGet, http.MethodPost,
http.MethodHead, http.MethodPatch, http.MethodDelete, http.MethodPut},
}).Handler(mux),
ReadHeaderTimeout: 30 * time.Second,
}
go func() {
err = httpServer.ListenAndServe()
if err != nil {
logger.Error("failed to start http server", zapcore.Field{Key: "err", Type: zapcore.ErrorType, Interface: err})
}
}()
logger.Info(fmt.Sprintf("OpenFGA HTTP server listening on '%s'...", httpServer.Addr))
return nil
}

View File

@ -0,0 +1,60 @@
package zanzana
import (
"fmt"
"strconv"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
)
const (
TypeUser string = "user"
TypeTeam string = "team"
)
func NewObject(typ, id string) string {
return fmt.Sprintf("%s:%s", typ, id)
}
func NewScopedObject(typ, id, scope string) string {
return NewObject(typ, fmt.Sprintf("%s-%s", scope, id))
}
// rbac action to relation translation
var actionTranslations = map[string]string{}
type kindTranslation struct {
typ string
orgScoped bool
}
// all kinds that we can translate into a openFGA object
var kindTranslations = map[string]kindTranslation{}
func TranslateToTuple(user string, action, kind, identifier string, orgID int64) (*openfgav1.TupleKey, bool) {
relation, ok := actionTranslations[action]
if !ok {
return nil, false
}
t, ok := kindTranslations[kind]
if !ok {
return nil, false
}
tuple := &openfgav1.TupleKey{
Relation: relation,
}
tuple.User = user
tuple.Relation = relation
// UID in grafana are not guarantee to be unique across orgs so we need to scope them.
if t.orgScoped {
tuple.Object = NewScopedObject(t.typ, identifier, strconv.FormatInt(orgID, 10))
} else {
tuple.Object = NewObject(t.typ, identifier)
}
return tuple, true
}

View File

@ -112,16 +112,16 @@ func (h *ContextHandler) Middleware(next http.Handler) http.Handler {
reqContext.Logger = reqContext.Logger.New("traceID", traceID)
}
identity, err := h.authnService.Authenticate(ctx, &authn.Request{HTTPRequest: reqContext.Req, Resp: reqContext.Resp})
id, err := h.authnService.Authenticate(ctx, &authn.Request{HTTPRequest: reqContext.Req, Resp: reqContext.Resp})
if err != nil {
// Hack: set all errors on LookupTokenErr, so we can check it in auth middlewares
reqContext.LookupTokenErr = err
} else {
reqContext.SignedInUser = identity.SignedInUser()
reqContext.UserToken = identity.SessionToken
reqContext.SignedInUser = id.SignedInUser()
reqContext.UserToken = id.SessionToken
reqContext.IsSignedIn = !reqContext.SignedInUser.IsAnonymous
reqContext.AllowAnonymous = reqContext.SignedInUser.IsAnonymous
reqContext.IsRenderCall = identity.IsAuthenticatedBy(login.RenderModule)
reqContext.IsRenderCall = id.IsAuthenticatedBy(login.RenderModule)
}
reqContext.Logger = reqContext.Logger.New("userId", reqContext.UserID, "orgId", reqContext.OrgID, "uname", reqContext.Login)
@ -138,7 +138,7 @@ func (h *ContextHandler) Middleware(next http.Handler) http.Handler {
// End the span to make next handlers not wrapped within middleware span
span.End()
next.ServeHTTP(w, r)
next.ServeHTTP(w, r.WithContext(identity.WithRequester(ctx, id)))
})
}

View File

@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/api/routing"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/authn"
"github.com/grafana/grafana/pkg/services/authn/authntest"
@ -44,20 +45,24 @@ func TestContextHandler(t *testing.T) {
})
t.Run("should set identity on successful authentication", func(t *testing.T) {
identity := &authn.Identity{ID: authn.NewNamespaceID(authn.NamespaceUser, 1), OrgID: 1}
id := &authn.Identity{ID: authn.NewNamespaceID(authn.NamespaceUser, 1), OrgID: 1}
handler := contexthandler.ProvideService(
setting.NewCfg(),
tracing.InitializeTracerForTest(),
featuremgmt.WithFeatures(),
&authntest.FakeService{ExpectedIdentity: identity},
&authntest.FakeService{ExpectedIdentity: id},
)
server := webtest.NewServer(t, routing.NewRouteRegister())
server.Mux.Use(handler.Middleware)
server.Mux.Get("/api/handler", func(c *contextmodel.ReqContext) {
require.True(t, c.IsSignedIn)
require.EqualValues(t, identity.SignedInUser(), c.SignedInUser)
require.EqualValues(t, id.SignedInUser(), c.SignedInUser)
require.NoError(t, c.LookupTokenErr)
requester, err := identity.GetRequester(c.Req.Context())
require.NoError(t, err)
require.Equal(t, id, requester)
})
res, err := server.Send(server.NewGetRequest("/api/handler"))

View File

@ -1371,6 +1371,13 @@ var (
HideFromDocs: true,
HideFromAdminPage: true,
},
{
Name: "cloudwatchMetricInsightsCrossAccount",
Description: "Enables cross account observability for Cloudwatch Metric Insights",
Stage: FeatureStageExperimental,
Owner: awsDatasourcesSquad,
FrontendOnly: true,
},
}
)

View File

@ -181,3 +181,4 @@ alertingApiServer,experimental,@grafana/alerting-squad,false,true,false
dashboardRestoreUI,experimental,@grafana/grafana-frontend-platform,false,false,false
cloudWatchRoundUpEndTime,GA,@grafana/aws-datasources,false,false,false
bodyScrolling,experimental,@grafana/grafana-frontend-platform,false,false,true
cloudwatchMetricInsightsCrossAccount,experimental,@grafana/aws-datasources,false,false,true

1 Name Stage Owner requiresDevMode RequiresRestart FrontendOnly
181 dashboardRestoreUI experimental @grafana/grafana-frontend-platform false false false
182 cloudWatchRoundUpEndTime GA @grafana/aws-datasources false false false
183 bodyScrolling experimental @grafana/grafana-frontend-platform false false true
184 cloudwatchMetricInsightsCrossAccount experimental @grafana/aws-datasources false false true

View File

@ -734,4 +734,8 @@ const (
// FlagBodyScrolling
// Adjusts Page to make body the scrollable element
FlagBodyScrolling = "bodyScrolling"
// FlagCloudwatchMetricInsightsCrossAccount
// Enables cross account observability for Cloudwatch Metric Insights
FlagCloudwatchMetricInsightsCrossAccount = "cloudwatchMetricInsightsCrossAccount"
)

View File

@ -590,6 +590,19 @@
"codeowner": "@grafana/aws-datasources"
}
},
{
"metadata": {
"name": "cloudwatchMetricInsightsCrossAccount",
"resourceVersion": "1719497905377",
"creationTimestamp": "2024-06-27T14:18:25Z"
},
"spec": {
"description": "Enables cross account observability for Cloudwatch Metric Insights",
"stage": "experimental",
"codeowner": "@grafana/aws-datasources",
"frontend": true
}
},
{
"metadata": {
"name": "configurableSchedulerTick",

View File

@ -45,7 +45,10 @@ func setupTestEnv(t *testing.T) *TestEnv {
}
logger := log.New("extsvcaccounts.test")
env.S = &ExtSvcAccountsService{
acSvc: acimpl.ProvideOSSService(cfg, env.AcStore, &resourcepermissions.FakeActionSetSvc{}, localcache.New(0, 0), fmgt, tracing.InitializeTracerForTest()),
acSvc: acimpl.ProvideOSSService(
cfg, env.AcStore, &resourcepermissions.FakeActionSetSvc{},
localcache.New(0, 0), fmgt, tracing.InitializeTracerForTest(), nil, nil,
),
features: fmgt,
logger: logger,
metrics: newMetrics(nil, env.SaSvc, logger),

View File

@ -62,6 +62,7 @@ func ProvideService(cfg *setting.Cfg, sqlStore db.DB, ac ac.AccessControl,
if features.IsEnabledGlobally(featuremgmt.FlagSsoSettingsLDAP) {
providersList = append(providersList, social.LDAPProviderName)
configurableProviders[social.LDAPProviderName] = true
}
if licensing.FeatureEnabled(social.SAMLProviderName) {
@ -320,21 +321,23 @@ func (s *Service) getFallbackStrategyFor(provider string) (ssosettings.FallbackS
}
func (s *Service) encryptSecrets(ctx context.Context, settings map[string]any) (map[string]any, error) {
result := make(map[string]any)
for k, v := range settings {
if IsSecretField(k) && v != "" {
strValue, ok := v.(string)
if !ok {
return result, fmt.Errorf("failed to encrypt %s setting because it is not a string: %v", k, v)
}
result := deepCopyMap(settings)
configs := getConfigMaps(result)
encryptedSecret, err := s.secrets.Encrypt(ctx, []byte(strValue), secrets.WithoutScope())
if err != nil {
return result, err
for _, config := range configs {
for k, v := range config {
if IsSecretField(k) && v != "" {
strValue, ok := v.(string)
if !ok {
return result, fmt.Errorf("failed to encrypt %s setting because it is not a string: %v", k, v)
}
encryptedSecret, err := s.secrets.Encrypt(ctx, []byte(strValue), secrets.WithoutScope())
if err != nil {
return result, err
}
config[k] = base64.RawStdEncoding.EncodeToString(encryptedSecret)
}
result[k] = base64.RawStdEncoding.EncodeToString(encryptedSecret)
} else {
result[k] = v
}
}
@ -411,29 +414,34 @@ func (s *Service) mergeSSOSettings(dbSettings, systemSettings *models.SSOSetting
}
func (s *Service) decryptSecrets(ctx context.Context, settings map[string]any) (map[string]any, error) {
for k, v := range settings {
if IsSecretField(k) && v != "" {
strValue, ok := v.(string)
if !ok {
s.logger.Error("Failed to parse secret value, it is not a string", "key", k)
return nil, fmt.Errorf("secret value is not a string")
}
configs := getConfigMaps(settings)
decoded, err := base64.RawStdEncoding.DecodeString(strValue)
if err != nil {
s.logger.Error("Failed to decode secret string", "err", err, "value")
return nil, err
}
for _, config := range configs {
for k, v := range config {
if IsSecretField(k) && v != "" {
strValue, ok := v.(string)
if !ok {
s.logger.Error("Failed to parse secret value, it is not a string", "key", k)
return nil, fmt.Errorf("secret value is not a string")
}
decrypted, err := s.secrets.Decrypt(ctx, decoded)
if err != nil {
s.logger.Error("Failed to decrypt secret", "err", err)
return nil, err
}
decoded, err := base64.RawStdEncoding.DecodeString(strValue)
if err != nil {
s.logger.Error("Failed to decode secret string", "err", err, "value")
return nil, err
}
settings[k] = string(decrypted)
decrypted, err := s.secrets.Decrypt(ctx, decoded)
if err != nil {
s.logger.Error("Failed to decrypt secret", "err", err)
return nil, err
}
config[k] = string(decrypted)
}
}
}
return settings, nil
}
@ -445,18 +453,39 @@ func (s *Service) isProviderConfigurable(provider string) bool {
// removeSecrets removes all the secrets from the map and replaces them with a redacted password
// and returns a new map
func removeSecrets(settings map[string]any) map[string]any {
result := make(map[string]any)
for k, v := range settings {
val, ok := v.(string)
if ok && val != "" && IsSecretField(k) {
result[k] = setting.RedactedPassword
continue
result := deepCopyMap(settings)
configs := getConfigMaps(result)
for _, config := range configs {
for k, v := range config {
val, ok := v.(string)
if ok && val != "" && IsSecretField(k) {
config[k] = setting.RedactedPassword
}
}
result[k] = v
}
return result
}
// getConfigMaps returns a list of maps that may contain secrets
func getConfigMaps(settings map[string]any) []map[string]any {
// always include the main settings map
result := []map[string]any{settings}
// for LDAP include settings for each server
if config, ok := settings["config"].(map[string]any); ok {
if servers, ok := config["servers"].([]any); ok {
for _, server := range servers {
if serverSettings, ok := server.(map[string]any); ok {
result = append(result, serverSettings)
}
}
}
}
return result
}
// mergeSettings merges two maps in a way that the values from the first map are preserved
// and the values from the second map are added only if they don't exist in the first map
// or if they contain empty URLs.
@ -500,23 +529,25 @@ func isMergingAllowed(fieldName string) bool {
// mergeSecrets returns a new map with the current value for secrets that have not been updated
func mergeSecrets(settings map[string]any, storedSettings map[string]any) (map[string]any, error) {
settingsWithSecrets := map[string]any{}
for k, v := range settings {
if IsSecretField(k) {
strValue, ok := v.(string)
if !ok {
return nil, fmt.Errorf("secret value is not a string")
}
settingsWithSecrets := deepCopyMap(settings)
newConfigs := getConfigMaps(settingsWithSecrets)
storedConfigs := getConfigMaps(storedSettings)
if isNewSecretValue(strValue) {
settingsWithSecrets[k] = strValue // use the new value
continue
for i, config := range newConfigs {
for k, v := range config {
if IsSecretField(k) {
strValue, ok := v.(string)
if !ok {
return nil, fmt.Errorf("secret value is not a string")
}
if !isNewSecretValue(strValue) && len(storedConfigs) > i {
config[k] = storedConfigs[i][k] // use the currently stored value
}
}
settingsWithSecrets[k] = storedSettings[k] // keep the currently stored value
} else {
settingsWithSecrets[k] = v
}
}
return settingsWithSecrets, nil
}
@ -532,7 +563,7 @@ func overrideMaps(maps ...map[string]any) map[string]any {
// IsSecretField returns true if the SSO settings field provided is a secret
func IsSecretField(fieldName string) bool {
secretFieldPatterns := []string{"secret", "private", "certificate"}
secretFieldPatterns := []string{"secret", "private", "certificate", "password", "client_key"}
for _, v := range secretFieldPatterns {
if strings.Contains(strings.ToLower(fieldName), strings.ToLower(v)) {
@ -554,3 +585,37 @@ func isEmptyString(val any) bool {
func isNewSecretValue(value string) bool {
return value != setting.RedactedPassword
}
func deepCopyMap(settings map[string]any) map[string]any {
newSettings := make(map[string]any)
for key, value := range settings {
switch v := value.(type) {
case map[string]any:
newSettings[key] = deepCopyMap(v)
case []any:
newSettings[key] = deepCopySlice(v)
default:
newSettings[key] = value
}
}
return newSettings
}
func deepCopySlice(s []any) []any {
newSlice := make([]any, len(s))
for i, value := range s {
switch v := value.(type) {
case map[string]any:
newSlice[i] = deepCopyMap(v)
case []any:
newSlice[i] = deepCopySlice(v)
default:
newSlice[i] = value
}
}
return newSlice
}

View File

@ -158,6 +158,62 @@ func TestService_GetForProvider(t *testing.T) {
},
wantErr: false,
},
{
name: "should decrypt secrets for LDAP if data is coming from store",
provider: "ldap",
setup: func(env testEnv) {
env.store.ExpectedSSOSetting = &models.SSOSettings{
Provider: "ldap",
Settings: map[string]any{
"enabled": true,
"config": map[string]any{
"servers": []any{
map[string]any{
"host": "192.168.0.1",
"bind_password": base64.RawStdEncoding.EncodeToString([]byte("bind_password_1")),
"client_key": base64.RawStdEncoding.EncodeToString([]byte("client_key_1")),
},
map[string]any{
"host": "192.168.0.2",
"bind_password": base64.RawStdEncoding.EncodeToString([]byte("bind_password_2")),
"client_key": base64.RawStdEncoding.EncodeToString([]byte("client_key_2")),
},
},
},
},
Source: models.DB,
}
env.fallbackStrategy.ExpectedIsMatch = true
env.fallbackStrategy.ExpectedConfigs = map[string]map[string]any{}
env.secrets.On("Decrypt", mock.Anything, []byte("bind_password_1"), mock.Anything).Return([]byte("decrypted-bind-password-1"), nil).Once()
env.secrets.On("Decrypt", mock.Anything, []byte("client_key_1"), mock.Anything).Return([]byte("decrypted-client-key-1"), nil).Once()
env.secrets.On("Decrypt", mock.Anything, []byte("bind_password_2"), mock.Anything).Return([]byte("decrypted-bind-password-2"), nil).Once()
env.secrets.On("Decrypt", mock.Anything, []byte("client_key_2"), mock.Anything).Return([]byte("decrypted-client-key-2"), nil).Once()
},
want: &models.SSOSettings{
Provider: "ldap",
Settings: map[string]any{
"enabled": true,
"config": map[string]any{
"servers": []any{
map[string]any{
"host": "192.168.0.1",
"bind_password": "decrypted-bind-password-1",
"client_key": "decrypted-client-key-1",
},
map[string]any{
"host": "192.168.0.2",
"bind_password": "decrypted-bind-password-2",
"client_key": "decrypted-client-key-2",
},
},
},
},
Source: models.DB,
},
wantErr: false,
},
{
name: "should not decrypt secrets if data is coming from the fallback strategy",
provider: "github",
@ -290,7 +346,7 @@ func TestService_GetForProvider(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, true, false, true)
env := setupTestEnv(t, true, false, true, true)
if tc.setup != nil {
tc.setup(env)
}
@ -314,13 +370,15 @@ func TestService_GetForProviderWithRedactedSecrets(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
setup func(env testEnv)
want *models.SSOSettings
wantErr bool
name string
provider string
setup func(env testEnv)
want *models.SSOSettings
wantErr bool
}{
{
name: "should return successfully and redact secrets",
name: "should return successfully and redact secrets",
provider: "github",
setup: func(env testEnv) {
env.store.ExpectedSSOSetting = &models.SSOSettings{
Provider: "github",
@ -347,13 +405,67 @@ func TestService_GetForProviderWithRedactedSecrets(t *testing.T) {
wantErr: false,
},
{
name: "should return error if store returns an error different than not found",
setup: func(env testEnv) { env.store.ExpectedError = fmt.Errorf("error") },
want: nil,
wantErr: true,
name: "should return successfully and redact secrets for LDAP",
provider: "ldap",
setup: func(env testEnv) {
env.store.ExpectedSSOSetting = &models.SSOSettings{
Provider: "ldap",
Settings: map[string]any{
"enabled": true,
"config": map[string]any{
"servers": []any{
map[string]any{
"host": "192.168.0.1",
"bind_password": base64.RawStdEncoding.EncodeToString([]byte("bind_password_1")),
"client_key": base64.RawStdEncoding.EncodeToString([]byte("client_key_1")),
},
map[string]any{
"host": "192.168.0.2",
"bind_password": base64.RawStdEncoding.EncodeToString([]byte("bind_password_2")),
"client_key": base64.RawStdEncoding.EncodeToString([]byte("client_key_2")),
},
},
},
},
Source: models.DB,
}
env.secrets.On("Decrypt", mock.Anything, []byte("bind_password_1"), mock.Anything).Return([]byte("decrypted-bind-password-1"), nil).Once()
env.secrets.On("Decrypt", mock.Anything, []byte("client_key_1"), mock.Anything).Return([]byte("decrypted-client-key-1"), nil).Once()
env.secrets.On("Decrypt", mock.Anything, []byte("bind_password_2"), mock.Anything).Return([]byte("decrypted-bind-password-2"), nil).Once()
env.secrets.On("Decrypt", mock.Anything, []byte("client_key_2"), mock.Anything).Return([]byte("decrypted-client-key-2"), nil).Once()
},
want: &models.SSOSettings{
Provider: "ldap",
Settings: map[string]any{
"enabled": true,
"config": map[string]any{
"servers": []any{
map[string]any{
"host": "192.168.0.1",
"bind_password": "*********",
"client_key": "*********",
},
map[string]any{
"host": "192.168.0.2",
"bind_password": "*********",
"client_key": "*********",
},
},
},
},
},
wantErr: false,
},
{
name: "should fallback to strategy if store returns not found",
name: "should return error if store returns an error different than not found",
provider: "github",
setup: func(env testEnv) { env.store.ExpectedError = fmt.Errorf("error") },
want: nil,
wantErr: true,
},
{
name: "should fallback to strategy if store returns not found",
provider: "github",
setup: func(env testEnv) {
env.store.ExpectedError = ssosettings.ErrNotFound
env.fallbackStrategy.ExpectedIsMatch = true
@ -371,7 +483,8 @@ func TestService_GetForProviderWithRedactedSecrets(t *testing.T) {
wantErr: false,
},
{
name: "should return error if the fallback strategy was not found",
name: "should return error if the fallback strategy was not found",
provider: "github",
setup: func(env testEnv) {
env.store.ExpectedError = ssosettings.ErrNotFound
env.fallbackStrategy.ExpectedIsMatch = false
@ -380,7 +493,8 @@ func TestService_GetForProviderWithRedactedSecrets(t *testing.T) {
wantErr: true,
},
{
name: "should return error if fallback strategy returns error",
name: "should return error if fallback strategy returns error",
provider: "github",
setup: func(env testEnv) {
env.store.ExpectedError = ssosettings.ErrNotFound
env.fallbackStrategy.ExpectedIsMatch = true
@ -399,12 +513,12 @@ func TestService_GetForProviderWithRedactedSecrets(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, true)
if tc.setup != nil {
tc.setup(env)
}
actual, err := env.service.GetForProviderWithRedactedSecrets(context.Background(), "github")
actual, err := env.service.GetForProviderWithRedactedSecrets(context.Background(), tc.provider)
if tc.wantErr {
require.Error(t, err)
@ -550,7 +664,7 @@ func TestService_List(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
if tc.setup != nil {
tc.setup(env)
}
@ -852,7 +966,7 @@ func TestService_ListWithRedactedSecrets(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
if tc.setup != nil {
tc.setup(env)
}
@ -876,7 +990,7 @@ func TestService_Upsert(t *testing.T) {
t.Run("successfully upsert SSO settings", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.AzureADProviderName
settings := models.SSOSettings{
@ -936,10 +1050,80 @@ func TestService_Upsert(t *testing.T) {
require.EqualValues(t, settings, env.store.ActualSSOSettings)
})
t.Run("successfully upsert SSO settings for LDAP", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false, true)
provider := social.LDAPProviderName
settings := models.SSOSettings{
Provider: provider,
Settings: map[string]any{
"enabled": true,
"config": map[string]any{
"servers": []any{
map[string]any{
"host": "192.168.0.1",
"bind_password": "bind_password_1",
"client_key": "client_key_1",
},
map[string]any{
"host": "192.168.0.2",
"bind_password": "bind_password_2",
"client_key": "client_key_2",
},
},
},
},
}
var wg sync.WaitGroup
wg.Add(1)
reloadable := ssosettingstests.NewMockReloadable(t)
reloadable.On("Validate", mock.Anything, settings, mock.Anything, mock.Anything).Return(nil)
reloadable.On("Reload", mock.Anything, mock.MatchedBy(func(settings models.SSOSettings) bool {
defer wg.Done()
return settings.Provider == provider &&
settings.ID == "someid" &&
maps.Equal(settings.Settings["config"].(map[string]any)["servers"].([]any)[0].(map[string]any), map[string]any{
"host": "192.168.0.1",
"bind_password": "bind_password_1",
"client_key": "client_key_1",
}) && maps.Equal(settings.Settings["config"].(map[string]any)["servers"].([]any)[1].(map[string]any), map[string]any{
"host": "192.168.0.2",
"bind_password": "bind_password_2",
"client_key": "client_key_2",
})
})).Return(nil).Once()
env.reloadables[provider] = reloadable
env.secrets.On("Encrypt", mock.Anything, []byte("bind_password_1"), mock.Anything).Return([]byte("encrypted-bind-password-1"), nil).Once()
env.secrets.On("Encrypt", mock.Anything, []byte("bind_password_2"), mock.Anything).Return([]byte("encrypted-bind-password-2"), nil).Once()
env.secrets.On("Encrypt", mock.Anything, []byte("client_key_1"), mock.Anything).Return([]byte("encrypted-client-key-1"), nil).Once()
env.secrets.On("Encrypt", mock.Anything, []byte("client_key_2"), mock.Anything).Return([]byte("encrypted-client-key-2"), nil).Once()
env.store.UpsertFn = func(ctx context.Context, settings *models.SSOSettings) error {
currentTime := time.Now()
settings.ID = "someid"
settings.Created = currentTime
settings.Updated = currentTime
env.store.ActualSSOSettings = *settings
return nil
}
err := env.service.Upsert(context.Background(), &settings, &user.SignedInUser{})
require.NoError(t, err)
// Wait for the goroutine first to assert the Reload call
wg.Wait()
require.EqualValues(t, settings, env.store.ActualSSOSettings)
})
t.Run("returns error if provider is not configurable", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.GrafanaComProviderName
settings := &models.SSOSettings{
@ -962,7 +1146,7 @@ func TestService_Upsert(t *testing.T) {
t.Run("returns error if provider was not found in reloadables", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.AzureADProviderName
settings := &models.SSOSettings{
@ -986,7 +1170,7 @@ func TestService_Upsert(t *testing.T) {
t.Run("returns error if validation fails", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.AzureADProviderName
settings := models.SSOSettings{
@ -1010,7 +1194,7 @@ func TestService_Upsert(t *testing.T) {
t.Run("returns error if a fallback strategy is not available for the provider", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
settings := &models.SSOSettings{
Provider: social.AzureADProviderName,
@ -1031,7 +1215,7 @@ func TestService_Upsert(t *testing.T) {
t.Run("returns error if a secret does not have the type string", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.OktaProviderName
settings := models.SSOSettings{
@ -1054,7 +1238,7 @@ func TestService_Upsert(t *testing.T) {
t.Run("returns error if secrets encryption failed", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.OktaProviderName
settings := models.SSOSettings{
@ -1079,7 +1263,7 @@ func TestService_Upsert(t *testing.T) {
t.Run("should not update the current secret if the secret has not been updated", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.AzureADProviderName
settings := models.SSOSettings{
@ -1123,7 +1307,7 @@ func TestService_Upsert(t *testing.T) {
t.Run("run validation with all new and current secrets available in settings", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.AzureADProviderName
settings := models.SSOSettings{
@ -1176,7 +1360,7 @@ func TestService_Upsert(t *testing.T) {
t.Run("returns error if store failed to upsert settings", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.AzureADProviderName
settings := models.SSOSettings{
@ -1208,7 +1392,7 @@ func TestService_Upsert(t *testing.T) {
t.Run("successfully upsert SSO settings if reload fails", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.AzureADProviderName
settings := models.SSOSettings{
@ -1241,7 +1425,7 @@ func TestService_Delete(t *testing.T) {
t.Run("successfully delete SSO settings", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
var wg sync.WaitGroup
wg.Add(1)
@ -1279,7 +1463,7 @@ func TestService_Delete(t *testing.T) {
t.Run("return error if SSO setting was not found for the specified provider", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.AzureADProviderName
reloadable := ssosettingstests.NewMockReloadable(t)
@ -1295,7 +1479,7 @@ func TestService_Delete(t *testing.T) {
t.Run("should not delete the SSO settings if the provider is not configurable", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
env.cfg.SSOSettingsConfigurableProviders = map[string]bool{social.AzureADProviderName: true}
provider := social.GrafanaComProviderName
@ -1308,7 +1492,7 @@ func TestService_Delete(t *testing.T) {
t.Run("return error when store fails to delete the SSO settings for the specified provider", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.AzureADProviderName
env.store.ExpectedError = errors.New("delete sso settings failed")
@ -1321,7 +1505,7 @@ func TestService_Delete(t *testing.T) {
t.Run("return successfully when the deletion was successful but reloading the settings fail", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := social.AzureADProviderName
reloadable := ssosettingstests.NewMockReloadable(t)
@ -1337,13 +1521,51 @@ func TestService_Delete(t *testing.T) {
})
}
// we might not need this test because it is not testing the public interface
// it was added for convenient testing of the internal deep copy and remove secrets
func TestRemoveSecrets(t *testing.T) {
settings := map[string]any{
"enabled": true,
"client_secret": "client_secret",
"config": map[string]any{
"servers": []any{
map[string]any{
"host": "192.168.0.1",
"bind_password": "bind_password_1",
"client_key": "client_key_1",
},
map[string]any{
"host": "192.168.0.2",
"bind_password": "bind_password_2",
"client_key": "client_key_2",
},
},
},
}
copiedSettings := deepCopyMap(settings)
copiedSettings["client_secret"] = "client_secret_updated"
copiedSettings["config"].(map[string]any)["servers"].([]any)[0].(map[string]any)["bind_password"] = "bind_password_1_updated"
require.Equal(t, "client_secret", settings["client_secret"])
require.Equal(t, "client_secret_updated", copiedSettings["client_secret"])
require.Equal(t, "bind_password_1", settings["config"].(map[string]any)["servers"].([]any)[0].(map[string]any)["bind_password"])
require.Equal(t, "bind_password_1_updated", copiedSettings["config"].(map[string]any)["servers"].([]any)[0].(map[string]any)["bind_password"])
settingsWithRedactedSecrets := removeSecrets(settings)
require.Equal(t, "client_secret", settings["client_secret"])
require.Equal(t, "*********", settingsWithRedactedSecrets["client_secret"])
require.Equal(t, "bind_password_1", settings["config"].(map[string]any)["servers"].([]any)[0].(map[string]any)["bind_password"])
require.Equal(t, "*********", settingsWithRedactedSecrets["config"].(map[string]any)["servers"].([]any)[0].(map[string]any)["bind_password"])
}
func TestService_DoReload(t *testing.T) {
t.Parallel()
t.Run("successfully reload settings", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
settingsList := []*models.SSOSettings{
{
@ -1383,7 +1605,7 @@ func TestService_DoReload(t *testing.T) {
t.Run("successfully reload settings when some providers have empty settings", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
settingsList := []*models.SSOSettings{
{
@ -1413,7 +1635,7 @@ func TestService_DoReload(t *testing.T) {
t.Run("failed fetching the SSO settings", func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
provider := "github"
@ -1459,6 +1681,35 @@ func TestService_decryptSecrets(t *testing.T) {
"certificate": "decrypted-certificate",
},
},
{
name: "should decrypt LDAP secrets successfully",
setup: func(env testEnv) {
env.secrets.On("Decrypt", mock.Anything, []byte("client_key"), mock.Anything).Return([]byte("decrypted-client-key"), nil).Once()
env.secrets.On("Decrypt", mock.Anything, []byte("bind_password"), mock.Anything).Return([]byte("decrypted-bind-password"), nil).Once()
},
settings: map[string]any{
"enabled": true,
"config": map[string]any{
"servers": []any{
map[string]any{
"client_key": base64.RawStdEncoding.EncodeToString([]byte("client_key")),
"bind_password": base64.RawStdEncoding.EncodeToString([]byte("bind_password")),
},
},
},
},
want: map[string]any{
"enabled": true,
"config": map[string]any{
"servers": []any{
map[string]any{
"client_key": "decrypted-client-key",
"bind_password": "decrypted-bind-password",
},
},
},
},
},
{
name: "should not decrypt when a secret is empty",
setup: func(env testEnv) {
@ -1514,7 +1765,7 @@ func TestService_decryptSecrets(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, false, false, false)
env := setupTestEnv(t, false, false, false, false)
if tc.setup != nil {
tc.setup(env)
@ -1593,7 +1844,7 @@ func Test_ProviderService(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
env := setupTestEnv(t, tc.isLicenseEnabled, true, tc.samlEnabled)
env := setupTestEnv(t, tc.isLicenseEnabled, true, tc.samlEnabled, false)
require.Equal(t, tc.expectedProvidersList, env.service.providersList)
require.Equal(t, tc.strategiesLength, len(env.service.fbStrategies))
@ -1601,7 +1852,7 @@ func Test_ProviderService(t *testing.T) {
}
}
func setupTestEnv(t *testing.T, isLicensingEnabled, keepFallbackStratergies, samlEnabled bool) testEnv {
func setupTestEnv(t *testing.T, isLicensingEnabled, keepFallbackStratergies, samlEnabled bool, ldapEnabled bool) testEnv {
t.Helper()
store := ssosettingstests.NewFakeStore()
@ -1631,10 +1882,14 @@ func setupTestEnv(t *testing.T, isLicensingEnabled, keepFallbackStratergies, sam
licensing := licensingtest.NewFakeLicensing()
licensing.On("FeatureEnabled", "saml").Return(isLicensingEnabled)
featureManager := featuremgmt.WithManager()
features := make([]any, 0)
if samlEnabled {
featureManager = featuremgmt.WithManager(featuremgmt.FlagSsoSettingsSAML)
features = append(features, featuremgmt.FlagSsoSettingsSAML)
}
if ldapEnabled {
features = append(features, featuremgmt.FlagSsoSettingsLDAP)
}
featureManager := featuremgmt.WithManager(features...)
svc := ProvideService(
cfg,

View File

@ -16,6 +16,10 @@ type ZanzanaSettings struct {
Addr string
// Mode can either be embedded or client
Mode ZanzanaMode
// ListenHTTP enables OpenFGA http server which allows to use fga cli
ListenHTTP bool
// OpenFGA http server address which allows to connect with fga cli
HttpAddr string
}
func (cfg *Cfg) readZanzanaSettings() {
@ -32,6 +36,8 @@ func (cfg *Cfg) readZanzanaSettings() {
}
s.Addr = sec.Key("address").MustString("")
s.ListenHTTP = sec.Key("listen_http").MustBool(false)
s.HttpAddr = sec.Key("http_addr").MustString("127.0.0.1:8080")
cfg.Zanzana = s
}

View File

@ -0,0 +1,32 @@
// Overriding the response types when enhancing endpoints is currently fiddly.
// The below approach is taken from/related to the below:
// https://github.com/reduxjs/redux-toolkit/issues/3901#issuecomment-1820995408
// https://github.com/reduxjs/redux-toolkit/issues/3443#issue-1709588268
//
// At the time of writing there is an open PR changing the API of `enhanceEndpoints`,
// which may help alleviate this when it lands:
// https://github.com/reduxjs/redux-toolkit/pull/3485
import { DefinitionsFromApi, OverrideResultType } from '@reduxjs/toolkit/dist/query/endpointDefinitions';
import {
ListTimeIntervalForAllNamespacesApiResponse,
generatedTimeIntervalsApi,
} from '../openapi/timeIntervalsApi.gen';
type Definitions = DefinitionsFromApi<typeof generatedTimeIntervalsApi>;
type UpdatedDefinitions = Omit<Definitions, 'listTimeIntervalForAllNamespaces'> & {
listTimeIntervalForAllNamespaces: OverrideResultType<
Definitions['listTimeIntervalForAllNamespaces'],
Array<ListTimeIntervalForAllNamespacesApiResponse['items'][0]['spec']>
>;
};
export const timeIntervalsApi = generatedTimeIntervalsApi.enhanceEndpoints<never, UpdatedDefinitions>({
endpoints: {
listTimeIntervalForAllNamespaces: {
transformResponse: (response: ListTimeIntervalForAllNamespacesApiResponse) =>
response.items.map((item) => item.spec),
},
},
});

View File

@ -38,24 +38,28 @@ export default function GettingStarted() {
<Text element="h3">Get started</Text>
<ul className={styles.list}>
<li>
<Text weight="bold">Create an alert rule</Text> by adding queries and expressions from multiple data
sources.
<Text weight="bold">Create an alert rule</Text> to query a data source and evaluate the condition defined
in the alert rule
</li>
<li>
<Text weight="bold">Add labels</Text> to your alert rules{' '}
<Text weight="bold">to connect them to notification policies</Text>
<Text weight="bold">Route alert notifications</Text> either directly to a contact point or through
notification policies for more flexibility
</li>
<li>
<Text weight="bold">Configure contact points</Text> to define where to send your notifications to.
</li>
<li>
<Text weight="bold">Configure notification policies</Text> to route your alert instances to contact
points.
<Text weight="bold">Monitor</Text> your alert rules using dashboards and visualizations
</li>
</ul>
<TextLink href="https://grafana.com/docs/grafana/latest/alerting/" icon="angle-right" inline={false} external>
Read more in the docs
</TextLink>
<p>
For a hands-on introduction, refer to our{' '}
<TextLink
href="https://grafana.com/tutorials/alerting-get-started/"
icon="angle-right"
inline={true}
external
>
tutorial to get started with Grafana Alerting
</TextLink>
</p>
</Stack>
</ContentBox>
</div>

View File

@ -0,0 +1,204 @@
import { alertingApi as api } from '../api/alertingApi';
export const addTagTypes = ['TimeInterval'] as const;
const injectedRtkApi = api
.enhanceEndpoints({
addTagTypes,
})
.injectEndpoints({
endpoints: (build) => ({
listTimeIntervalForAllNamespaces: build.query<
ListTimeIntervalForAllNamespacesApiResponse,
ListTimeIntervalForAllNamespacesApiArg
>({
query: (queryArg) => ({
url: `/apis/notifications.alerting.grafana.app/v0alpha1/timeintervals`,
params: {
allowWatchBookmarks: queryArg.allowWatchBookmarks,
continue: queryArg['continue'],
fieldSelector: queryArg.fieldSelector,
labelSelector: queryArg.labelSelector,
limit: queryArg.limit,
pretty: queryArg.pretty,
resourceVersion: queryArg.resourceVersion,
resourceVersionMatch: queryArg.resourceVersionMatch,
sendInitialEvents: queryArg.sendInitialEvents,
timeoutSeconds: queryArg.timeoutSeconds,
watch: queryArg.watch,
},
}),
providesTags: ['TimeInterval'],
}),
}),
overrideExisting: false,
});
export { injectedRtkApi as generatedTimeIntervalsApi };
export type ListTimeIntervalForAllNamespacesApiResponse =
/** status 200 OK */ ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1TimeIntervalList;
export type ListTimeIntervalForAllNamespacesApiArg = {
/** allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. */
allowWatchBookmarks?: boolean;
/** The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key".
This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. */
continue?: string;
/** A selector to restrict the list of returned objects by their fields. Defaults to everything. */
fieldSelector?: string;
/** A selector to restrict the list of returned objects by their labels. Defaults to everything. */
labelSelector?: string;
/** limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.
The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. */
limit?: number;
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
pretty?: string;
/** resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.
Defaults to unset */
resourceVersion?: string;
/** resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.
Defaults to unset */
resourceVersionMatch?: string;
/** `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic "Bookmark" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.
When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan
is interpreted as "data at least as new as the provided `resourceVersion`"
and the bookmark event is send when the state is synced
to a `resourceVersion` at least as fresh as the one provided by the ListOptions.
If `resourceVersion` is unset, this is interpreted as "consistent read" and the
bookmark event is send when the state is synced at least to the moment
when request started being processed.
- `resourceVersionMatch` set to any other value or unset
Invalid error is returned.
Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward compatibility reasons) and to false otherwise. */
sendInitialEvents?: boolean;
/** Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. */
timeoutSeconds?: number;
/** Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. */
watch?: boolean;
};
export type IoK8SApimachineryPkgApisMetaV1Time = string;
export type IoK8SApimachineryPkgApisMetaV1FieldsV1 = object;
export type IoK8SApimachineryPkgApisMetaV1ManagedFieldsEntry = {
/** APIVersion defines the version of this resource that this field set applies to. The format is "group/version" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted. */
apiVersion?: string;
/** FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: "FieldsV1" */
fieldsType?: string;
/** FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. */
fieldsV1?: IoK8SApimachineryPkgApisMetaV1FieldsV1;
/** Manager is an identifier of the workflow managing these fields. */
manager?: string;
/** Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'. */
operation?: string;
/** Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource. */
subresource?: string;
/** Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over. */
time?: IoK8SApimachineryPkgApisMetaV1Time;
};
export type IoK8SApimachineryPkgApisMetaV1OwnerReference = {
/** API version of the referent. */
apiVersion: string;
/** If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. */
blockOwnerDeletion?: boolean;
/** If true, this reference points to the managing controller. */
controller?: boolean;
/** Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */
kind: string;
/** Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names */
name: string;
/** UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids */
uid: string;
};
export type IoK8SApimachineryPkgApisMetaV1ObjectMeta = {
/** Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations */
annotations?: {
[key: string]: string;
};
/** CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata */
creationTimestamp?: IoK8SApimachineryPkgApisMetaV1Time;
/** Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only. */
deletionGracePeriodSeconds?: number;
/** DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.
Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata */
deletionTimestamp?: IoK8SApimachineryPkgApisMetaV1Time;
/** Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list. */
finalizers?: string[];
/** GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.
If this field is specified and the generated name exists, the server will return a 409.
Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency */
generateName?: string;
/** A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. */
generation?: number;
/** Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels */
labels?: {
[key: string]: string;
};
/** ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object. */
managedFields?: IoK8SApimachineryPkgApisMetaV1ManagedFieldsEntry[];
/** Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names */
name?: string;
/** Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.
Must be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces */
namespace?: string;
/** List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. */
ownerReferences?: IoK8SApimachineryPkgApisMetaV1OwnerReference[];
/** An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.
Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency */
resourceVersion?: string;
/** Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. */
selfLink?: string;
/** UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.
Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids */
uid?: string;
};
export type ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1TimeRange = {
end_time: string;
start_time: string;
};
export type ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1Interval = {
days_of_month?: string[];
location?: string;
months?: string[];
times?: ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1TimeRange[];
weekdays?: string[];
years?: string[];
};
export type ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1TimeIntervalSpec = {
name: string;
time_intervals: ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1Interval[];
};
export type ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1TimeInterval = {
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
apiVersion?: string;
/** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */
kind?: string;
metadata: IoK8SApimachineryPkgApisMetaV1ObjectMeta;
spec: ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1TimeIntervalSpec;
};
export type IoK8SApimachineryPkgApisMetaV1ListMeta = {
/** continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message. */
continue?: string;
/** remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact. */
remainingItemCount?: number;
/** String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency */
resourceVersion?: string;
/** Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. */
selfLink?: string;
};
export type ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1TimeIntervalList = {
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
apiVersion?: string;
items: ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1TimeInterval[];
/** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */
kind?: string;
metadata: IoK8SApimachineryPkgApisMetaV1ListMeta;
};

View File

@ -3,7 +3,7 @@ import { memo, useEffect, useMemo } from 'react';
import AutoSizer from 'react-virtualized-auto-sizer';
import { GrafanaTheme2 } from '@grafana/data';
import { reportInteraction } from '@grafana/runtime';
import { locationService, reportInteraction } from '@grafana/runtime';
import { FilterInput, useStyles2 } from '@grafana/ui';
import { Page } from 'app/core/components/Page/Page';
import { GrafanaRouteComponentProps } from 'app/core/navigation/types';
@ -39,6 +39,7 @@ const BrowseDashboardsPage = memo(({ match }: Props) => {
const styles = useStyles2(getStyles);
const [searchState, stateManager] = useSearchStateManager();
const isSearching = stateManager.hasSearchFilters();
const search = locationService.getSearch();
useEffect(() => {
stateManager.initStateFromUrl(folderUID);
@ -52,6 +53,11 @@ const BrowseDashboardsPage = memo(({ match }: Props) => {
);
}, [dispatch, folderUID, stateManager]);
// Trigger search when "starred" query param changes
useEffect(() => {
stateManager.onSetStarred(search.has('starred'));
}, [search, stateManager]);
useEffect(() => {
// Clear the search results when we leave SearchView to prevent old results flashing
// when starting a new search

View File

@ -16,6 +16,8 @@ export function setupKeyboardShortcuts(scene: DashboardScene) {
const keybindings = new KeybindingSet();
let vizPanelKey: string | null = null;
const canEdit = scene.canEditDashboard();
const panelAttentionSubscription = appEvents.subscribe(SetPanelAttentionEvent, (event) => {
if (typeof event.payload.panelId === 'string') {
vizPanelKey = event.payload.panelId;
@ -42,20 +44,6 @@ export function setupKeyboardShortcuts(scene: DashboardScene) {
}),
});
// Panel edit
keybindings.addBinding({
key: 'e',
onTrigger: withFocusedPanel(scene, async (vizPanel: VizPanel) => {
const sceneRoot = vizPanel.getRoot();
if (sceneRoot instanceof DashboardScene) {
const panelId = getPanelIdForVizPanel(vizPanel);
if (!scene.state.editPanel) {
locationService.push(getEditPanelUrl(panelId));
}
}
}),
});
// Panel share
keybindings.addBinding({
key: 'p s',
@ -133,38 +121,55 @@ export function setupKeyboardShortcuts(scene: DashboardScene) {
},
});
// Dashboard settings
keybindings.addBinding({
key: 'd s',
onTrigger: scene.onOpenSettings,
});
if (canEdit) {
// Panel edit
keybindings.addBinding({
key: 'e',
onTrigger: withFocusedPanel(scene, async (vizPanel: VizPanel) => {
const sceneRoot = vizPanel.getRoot();
if (sceneRoot instanceof DashboardScene) {
const panelId = getPanelIdForVizPanel(vizPanel);
if (!scene.state.editPanel) {
locationService.push(getEditPanelUrl(panelId));
}
}
}),
});
keybindings.addBinding({
key: 'mod+s',
onTrigger: () => scene.openSaveDrawer({}),
});
// Dashboard settings
keybindings.addBinding({
key: 'd s',
onTrigger: scene.onOpenSettings,
});
// Open save drawer
keybindings.addBinding({
key: 'mod+s',
onTrigger: () => scene.openSaveDrawer({}),
});
// delete panel
keybindings.addBinding({
key: 'p r',
onTrigger: withFocusedPanel(scene, (vizPanel: VizPanel) => {
if (scene.state.isEditing) {
onRemovePanel(scene, vizPanel);
}
}),
});
// duplicate panel
keybindings.addBinding({
key: 'p d',
onTrigger: withFocusedPanel(scene, (vizPanel: VizPanel) => {
if (scene.state.isEditing) {
scene.duplicatePanel(vizPanel);
}
}),
});
}
// toggle all panel legends (TODO)
// delete panel
keybindings.addBinding({
key: 'p r',
onTrigger: withFocusedPanel(scene, (vizPanel: VizPanel) => {
if (scene.state.isEditing) {
onRemovePanel(scene, vizPanel);
}
}),
});
// duplicate panel
keybindings.addBinding({
key: 'p d',
onTrigger: withFocusedPanel(scene, (vizPanel: VizPanel) => {
if (scene.state.isEditing) {
scene.duplicatePanel(vizPanel);
}
}),
});
// toggle all exemplars (TODO)
// collapse all rows (TODO)
// expand all rows (TODO)

View File

@ -44,6 +44,11 @@ jest.mock('@grafana/runtime', () => ({
featureToggles: {
newDashboardWithFiltersAndGroupBy: false,
},
bootData: {
user: {
timezone: 'Africa/Abidjan',
},
},
},
getDataSourceSrv: () => ({
get: (): Promise<DataSourceApi> => {
@ -72,5 +77,10 @@ describe('buildNewDashboardSaveModel', () => {
expect(result.dashboard.templating?.list?.[0].type).toBe('adhoc');
expect(result.dashboard.templating?.list?.[1].type).toBe('groupby');
});
it("should set the new dashboard's timezone to the user's timezone", async () => {
const result = await buildNewDashboardSaveModel();
expect(result.dashboard.timezone).toEqual('Africa/Abidjan');
});
});
});

View File

@ -46,6 +46,7 @@ export async function buildNewDashboardSaveModel(urlFolderUid?: string): Promise
uid: '',
title: 'New dashboard',
panels: [],
timezone: config.bootData.user?.timezone || defaultDashboard.timezone,
},
};

View File

@ -26,6 +26,7 @@ export const UnsupportedDataSourcesAlert = ({ unsupportedDataSources }: { unsupp
</p>
<a
href="https://grafana.com/docs/grafana/next/dashboards/dashboard-public/"
target="blank"
className={cx('text-link', styles.unsupportedDataSourceDescription)}
>
<Trans i18nKey="public-dashboard.modal-alerts.unsupport-data-source-alert-readmore-link">

View File

@ -161,6 +161,12 @@ export class SearchStateManager extends StateManagerBase<SearchState> {
this.setStateAndDoSearch({ starred: false });
};
onSetStarred = (starred: boolean) => {
if (starred !== this.state.starred) {
this.setStateAndDoSearch({ starred });
}
};
onSortChange = (sort: string | undefined) => {
if (sort) {
localStorage.setItem(SEARCH_SELECTED_SORT, sort);

View File

@ -271,6 +271,39 @@ describe('LokiDatasource', () => {
'rate({bar="baz", job="foo", k1=~"v.*", k2=~"v\\\\\'.*"} |= "bar" [5m])'
);
});
it('should interpolate before adding adhoc filters', async () => {
const originalQuery = 'rate({bar="baz", job="foo"} |= "bar" [$__auto])';
const interpolatedQuery = 'rate({bar="baz", job="foo"} |= "bar" [5m])';
const templateSrv = {
replace: jest.fn().mockImplementation((input: string) => interpolatedQuery),
getVariables: () => [],
};
const query: LokiQuery = { expr: originalQuery, refId: 'A' };
const ds = createLokiDatasource(templateSrv);
const adhocFilters = [
{
key: 'k1',
operator: '=',
value: 'v1',
},
{
key: 'k2',
operator: '!=',
value: 'v2',
},
];
jest.spyOn(ds, 'addAdHocFilters');
ds.applyTemplateVariables(query, {}, adhocFilters);
expect(templateSrv.replace).toHaveBeenCalledWith(originalQuery, expect.any(Object), expect.any(Function));
expect(ds.addAdHocFilters).toHaveBeenCalledWith(interpolatedQuery, adhocFilters);
expect(ds.applyTemplateVariables(query, {}, adhocFilters).expr).toBe(
'rate({bar="baz", job="foo", k1="v1", k2!="v2"} |= "bar" [5m])'
);
});
});
describe('when interpolating variables', () => {

View File

@ -1089,8 +1089,6 @@ export class LokiDatasource
// alerting/ML queries and we want to have consistent interpolation for all queries
const { __auto, __interval, __interval_ms, __range, __range_s, __range_ms, ...rest } = scopedVars || {};
const exprWithAdHoc = this.addAdHocFilters(target.expr, adhocFilters);
const variables = {
...rest,
@ -1102,10 +1100,16 @@ export class LokiDatasource
value: '$__interval_ms',
},
};
const exprWithAdHoc = this.addAdHocFilters(
this.templateSrv.replace(target.expr, variables, this.interpolateQueryExpr),
adhocFilters
);
return {
...target,
legendFormat: this.templateSrv.replace(target.legendFormat, rest),
expr: this.templateSrv.replace(exprWithAdHoc, variables, this.interpolateQueryExpr),
expr: exprWithAdHoc,
};
}

View File

@ -1,11 +1,6 @@
// MIXINS
@import 'mixins/mixins';
@import 'mixins/buttons';
@import 'mixins/breakpoints';
@import 'mixins/grid';
@import 'mixins/grid-framework';
@import 'mixins/hover';
@import 'mixins/forms';
// BASE
@import 'base/reboot';

View File

@ -1,3 +1,37 @@
@use 'sass:color';
@mixin form-control-validation($color) {
// Color the label and help text
.text-help,
.form-control-label,
.radio,
.checkbox,
.radio-inline,
.checkbox-inline,
&.radio label,
&.checkbox label,
&.radio-inline label,
&.checkbox-inline label,
.custom-control {
color: $color;
}
.form-control {
border-color: $color;
}
// Set validation states also for addons
.input-group-addon {
color: $color;
border-color: $color;
background-color: color.adjust($color, $lightness: 40%);
}
// Optional feedback icon
.form-control-feedback {
color: $color;
}
}
//
// Forms
// --------------------------------------------------

View File

@ -1,3 +1,124 @@
@use 'sass:math';
/// Grid system
//
// Generate semantic grid columns with these mixins.
@mixin make-container($gutter: $grid-gutter-width) {
margin-left: auto;
margin-right: auto;
padding-left: calc($gutter / 2);
padding-right: calc($gutter / 2);
@if not $enable-flex {
@include clearfix();
}
}
// For each breakpoint, define the maximum width of the container in a media query
@mixin make-container-max-widths($max-widths: $container-max-widths, $breakpoints: $grid-breakpoints) {
@each $breakpoint, $container-max-width in $max-widths {
@include media-breakpoint-up($breakpoint, $breakpoints) {
max-width: $container-max-width;
}
}
}
@mixin make-row($gutter: $grid-gutter-width) {
@if $enable-flex {
display: flex;
flex-wrap: wrap;
} @else {
@include clearfix();
}
margin-left: calc($gutter / -2);
margin-right: calc($gutter / -2);
}
@mixin make-col($size, $columns: $grid-columns) {
position: relative;
min-height: 1px;
padding-right: calc($grid-gutter-width / 2);
padding-left: calc($grid-gutter-width / 2);
@if $enable-flex {
flex: 0 0 math.percentage(calc($size / $columns));
// Add a `max-width` to ensure content within each column does not blow out
// the width of the column. Applies to IE10+ and Firefox. Chrome and Safari
// do not appear to require this.
max-width: math.percentage(calc($size / $columns));
} @else {
float: left;
width: math.percentage(calc($size / $columns));
}
}
@mixin make-col-offset($size, $columns: $grid-columns) {
margin-left: math.percentage(calc($size / $columns));
}
@mixin make-col-push($size, $columns: $grid-columns) {
left: if($size > 0, math.percentage(calc($size / $columns)), auto);
}
@mixin make-col-pull($size, $columns: $grid-columns) {
right: if($size > 0, math.percentage(calc($size / $columns)), auto);
}
@mixin make-col-modifier($type, $size, $columns) {
// Work around the lack of dynamic mixin @include support (https://github.com/sass/sass/issues/626)
@if $type == push {
@include make-col-push($size, $columns);
} @else if $type == pull {
@include make-col-pull($size, $columns);
} @else if $type == offset {
@include make-col-offset($size, $columns);
}
}
@mixin make-grid-columns($columns: $grid-columns, $gutter: $grid-gutter-width, $breakpoints: $grid-breakpoints) {
$breakpoint-counter: 0;
@each $breakpoint in map-keys($breakpoints) {
$breakpoint-counter: ($breakpoint-counter + 1);
@include media-breakpoint-up($breakpoint, $breakpoints) {
@if $enable-flex {
.col-#{$breakpoint} {
position: relative;
flex-basis: 0;
flex-grow: 1;
max-width: 100%;
min-height: 1px;
padding-right: calc($grid-gutter-width / 2);
padding-left: calc($grid-gutter-width / 2);
}
}
@for $i from 1 through $columns {
.col-#{$breakpoint}-#{$i} {
@include make-col($i, $columns);
}
}
@each $modifier in (pull, push) {
@for $i from 0 through $columns {
.#{$modifier}-#{$breakpoint}-#{$i} {
@include make-col-modifier($modifier, $i, $columns);
}
}
}
// `$columns - 1` because offsetting by the width of an entire row isn't possible
@for $i from 0 through ($columns - 1) {
@if $breakpoint-counter != 1 or $i != 0 {
// Avoid emitting useless .col-xs-offset-0
.offset-#{$breakpoint}-#{$i} {
@include make-col-modifier(offset, $i, $columns);
}
}
}
}
}
}
// Container widths
//
// Set the container width, and override it for fixed navbars in media queries.

View File

@ -1,5 +1,110 @@
@use 'sass:color';
@use 'sass:map';
@mixin hover {
@if $enable-hover-media-query {
// See Media Queries Level 4: http://drafts.csswg.org/mediaqueries/#hover
// Currently shimmed by https://github.com/twbs/mq4-hover-shim
@media (hover: hover) {
&:hover {
@content;
}
}
} @else {
&:hover {
@content;
}
}
}
@mixin hover-focus {
@if $enable-hover-media-query {
&:focus {
@content;
}
@include hover {
@content;
}
} @else {
&:focus,
&:hover {
@content;
}
}
}
// Button backgrounds
// ------------------
@mixin buttonBackground($startColor, $endColor, $text-color: #fff, $textShadow: 0px 1px 0 rgba(0, 0, 0, 0.1)) {
// gradientBar will set the background to a pleasing blend of these, to support IE<=9
@include gradientBar($startColor, $endColor, $text-color, $textShadow);
// in these cases the gradient won't cover the background, so we override
&:hover,
&:focus,
&:active,
&.active,
&.disabled,
&[disabled] {
color: $text-color;
background-image: none;
background-color: $startColor;
}
}
// Button sizes
@mixin button-size($padding-y, $padding-x, $font-size, $border-radius) {
padding: $padding-y $padding-x;
font-size: $font-size;
//box-shadow: inset 0 (-$padding-y/3) rgba(0,0,0,0.15);
@include border-radius($border-radius);
}
@mixin button-outline-variant($color) {
color: $white;
background-image: none;
background-color: transparent;
border: 1px solid $white;
@include hover {
color: $white;
background-color: $color;
}
&:focus,
&.focus {
color: $white;
background-color: $color;
}
&:active,
&.active,
.open > &.dropdown-toggle {
color: $white;
background-color: $color;
&:hover,
&:focus,
&.focus {
color: $white;
background-color: color.adjust($color, $lightness: -17%);
border-color: color.adjust($color, $lightness: -25%);
}
}
&.disabled,
&:disabled {
&:focus,
&.focus {
border-color: color.adjust($color, $lightness: 20%);
}
@include hover {
border-color: color.adjust($color, $lightness: 20%);
}
}
}
//
// Buttons
// --------------------------------------------------

View File

@ -1,6 +1,13 @@
@use 'sass:list';
$input-border: 1px solid $input-border-color;
@mixin form-control-focus() {
&:focus {
border-color: $input-border-focus;
outline: none;
}
}
.gf-form {
display: flex;
flex-direction: row;

View File

@ -1,73 +0,0 @@
@use 'sass:color';
// Button backgrounds
// ------------------
@mixin buttonBackground($startColor, $endColor, $text-color: #fff, $textShadow: 0px 1px 0 rgba(0, 0, 0, 0.1)) {
// gradientBar will set the background to a pleasing blend of these, to support IE<=9
@include gradientBar($startColor, $endColor, $text-color, $textShadow);
// in these cases the gradient won't cover the background, so we override
&:hover,
&:focus,
&:active,
&.active,
&.disabled,
&[disabled] {
color: $text-color;
background-image: none;
background-color: $startColor;
}
}
// Button sizes
@mixin button-size($padding-y, $padding-x, $font-size, $border-radius) {
padding: $padding-y $padding-x;
font-size: $font-size;
//box-shadow: inset 0 (-$padding-y/3) rgba(0,0,0,0.15);
@include border-radius($border-radius);
}
@mixin button-outline-variant($color) {
color: $white;
background-image: none;
background-color: transparent;
border: 1px solid $white;
@include hover {
color: $white;
background-color: $color;
}
&:focus,
&.focus {
color: $white;
background-color: $color;
}
&:active,
&.active,
.open > &.dropdown-toggle {
color: $white;
background-color: $color;
&:hover,
&:focus,
&.focus {
color: $white;
background-color: color.adjust($color, $lightness: -17%);
border-color: color.adjust($color, $lightness: -25%);
}
}
&.disabled,
&:disabled {
&:focus,
&.focus {
border-color: color.adjust($color, $lightness: 20%);
}
@include hover {
border-color: color.adjust($color, $lightness: 20%);
}
}
}

View File

@ -1,66 +0,0 @@
@use 'sass:color';
@mixin form-control-validation($color) {
// Color the label and help text
.text-help,
.form-control-label,
.radio,
.checkbox,
.radio-inline,
.checkbox-inline,
&.radio label,
&.checkbox label,
&.radio-inline label,
&.checkbox-inline label,
.custom-control {
color: $color;
}
.form-control {
border-color: $color;
}
// Set validation states also for addons
.input-group-addon {
color: $color;
border-color: $color;
background-color: color.adjust($color, $lightness: 40%);
}
// Optional feedback icon
.form-control-feedback {
color: $color;
}
}
@mixin form-control-focus() {
&:focus {
border-color: $input-border-focus;
outline: none;
}
}
// Form control sizing
//
// Relative text size, padding, and border-radii changes for form controls. For
// horizontal sizing, wrap controls in the predefined grid classes. `<select>`
// element gets special love because it's special, and that's a fact!
@mixin input-size($parent, $input-height, $padding-y, $padding-x, $font-size, $line-height, $border-radius) {
#{$parent} {
height: $input-height;
padding: $padding-y $padding-x;
font-size: $font-size;
line-height: $line-height;
@include border-radius($border-radius);
}
select#{$parent} {
height: $input-height;
line-height: $input-height;
}
textarea#{$parent},
select[multiple]#{$parent} {
height: auto;
}
}

View File

@ -1,48 +0,0 @@
// Framework grid generation
//
// Used only by Bootstrap to generate the correct number of grid classes given
// any value of `$grid-columns`.
@mixin make-grid-columns($columns: $grid-columns, $gutter: $grid-gutter-width, $breakpoints: $grid-breakpoints) {
$breakpoint-counter: 0;
@each $breakpoint in map-keys($breakpoints) {
$breakpoint-counter: ($breakpoint-counter + 1);
@include media-breakpoint-up($breakpoint, $breakpoints) {
@if $enable-flex {
.col-#{$breakpoint} {
position: relative;
flex-basis: 0;
flex-grow: 1;
max-width: 100%;
min-height: 1px;
padding-right: calc($grid-gutter-width / 2);
padding-left: calc($grid-gutter-width / 2);
}
}
@for $i from 1 through $columns {
.col-#{$breakpoint}-#{$i} {
@include make-col($i, $columns);
}
}
@each $modifier in (pull, push) {
@for $i from 0 through $columns {
.#{$modifier}-#{$breakpoint}-#{$i} {
@include make-col-modifier($modifier, $i, $columns);
}
}
}
// `$columns - 1` because offsetting by the width of an entire row isn't possible
@for $i from 0 through ($columns - 1) {
@if $breakpoint-counter != 1 or $i != 0 {
// Avoid emitting useless .col-xs-offset-0
.offset-#{$breakpoint}-#{$i} {
@include make-col-modifier(offset, $i, $columns);
}
}
}
}
}
}

View File

@ -1,76 +0,0 @@
@use 'sass:math';
/// Grid system
//
// Generate semantic grid columns with these mixins.
@mixin make-container($gutter: $grid-gutter-width) {
margin-left: auto;
margin-right: auto;
padding-left: calc($gutter / 2);
padding-right: calc($gutter / 2);
@if not $enable-flex {
@include clearfix();
}
}
// For each breakpoint, define the maximum width of the container in a media query
@mixin make-container-max-widths($max-widths: $container-max-widths, $breakpoints: $grid-breakpoints) {
@each $breakpoint, $container-max-width in $max-widths {
@include media-breakpoint-up($breakpoint, $breakpoints) {
max-width: $container-max-width;
}
}
}
@mixin make-row($gutter: $grid-gutter-width) {
@if $enable-flex {
display: flex;
flex-wrap: wrap;
} @else {
@include clearfix();
}
margin-left: calc($gutter / -2);
margin-right: calc($gutter / -2);
}
@mixin make-col($size, $columns: $grid-columns) {
position: relative;
min-height: 1px;
padding-right: calc($grid-gutter-width / 2);
padding-left: calc($grid-gutter-width / 2);
@if $enable-flex {
flex: 0 0 math.percentage(calc($size / $columns));
// Add a `max-width` to ensure content within each column does not blow out
// the width of the column. Applies to IE10+ and Firefox. Chrome and Safari
// do not appear to require this.
max-width: math.percentage(calc($size / $columns));
} @else {
float: left;
width: math.percentage(calc($size / $columns));
}
}
@mixin make-col-offset($size, $columns: $grid-columns) {
margin-left: math.percentage(calc($size / $columns));
}
@mixin make-col-push($size, $columns: $grid-columns) {
left: if($size > 0, math.percentage(calc($size / $columns)), auto);
}
@mixin make-col-pull($size, $columns: $grid-columns) {
right: if($size > 0, math.percentage(calc($size / $columns)), auto);
}
@mixin make-col-modifier($type, $size, $columns) {
// Work around the lack of dynamic mixin @include support (https://github.com/sass/sass/issues/626)
@if $type == push {
@include make-col-push($size, $columns);
} @else if $type == pull {
@include make-col-pull($size, $columns);
} @else if $type == offset {
@include make-col-offset($size, $columns);
}
}

View File

@ -1,67 +0,0 @@
@mixin hover {
@if $enable-hover-media-query {
// See Media Queries Level 4: http://drafts.csswg.org/mediaqueries/#hover
// Currently shimmed by https://github.com/twbs/mq4-hover-shim
@media (hover: hover) {
&:hover {
@content;
}
}
} @else {
&:hover {
@content;
}
}
}
@mixin hover-focus {
@if $enable-hover-media-query {
&:focus {
@content;
}
@include hover {
@content;
}
} @else {
&:focus,
&:hover {
@content;
}
}
}
@mixin plain-hover-focus {
@if $enable-hover-media-query {
&,
&:focus {
@content;
}
@include hover {
@content;
}
} @else {
&,
&:focus,
&:hover {
@content;
}
}
}
@mixin hover-focus-active {
@if $enable-hover-media-query {
&:focus,
&:active {
@content;
}
@include hover {
@content;
}
} @else {
&:focus,
&:active,
&:hover {
@content;
}
}
}

View File

@ -0,0 +1,38 @@
/**
* To generate alerting k8s APIs, run:
* `npx rtk-query-codegen-openapi ./scripts/generate-alerting-rtk-apis.ts`
*/
import { ConfigFile } from '@rtk-query/codegen-openapi';
import { accessSync } from 'fs';
const schemaFile = '../data/alerting/openapi.json';
try {
// Check we have the OpenAPI before generating alerting RTK APIs,
// as this is currently a manual process
accessSync(schemaFile);
} catch (e) {
console.error('\nCould not find OpenAPI definition.\n');
console.error(
'Please visit /openapi/v3/apis/notifications.alerting.grafana.app/v0alpha1 and save the OpenAPI definition to data/alerting/openapi.json\n'
);
throw e;
}
const config: ConfigFile = {
schemaFile,
apiFile: '',
tag: true,
outputFiles: {
'../public/app/features/alerting/unified/openapi/timeIntervalsApi.gen.ts': {
apiFile: '../public/app/features/alerting/unified/api/alertingApi.ts',
apiImport: 'alertingApi',
filterEndpoints: ['listTimeIntervalForAllNamespaces'],
exportName: 'generatedTimeIntervalsApi',
flattenArg: false,
},
},
};
export default config;