mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Chore: Remove context.TODO() (#43409)
* Remove context.TODO() from services * Fix live test * Remove context.TODO
This commit is contained in:
@@ -416,7 +416,7 @@ func GetDataSourceIdByName(c *models.ReqContext) response.Response {
|
||||
// /api/datasources/:id/resources/*
|
||||
func (hs *HTTPServer) CallDatasourceResource(c *models.ReqContext) {
|
||||
datasourceID := c.ParamsInt64(":id")
|
||||
ds, err := hs.DataSourceCache.GetDatasource(datasourceID, c.SignedInUser, c.SkipCache)
|
||||
ds, err := hs.DataSourceCache.GetDatasource(c.Req.Context(), datasourceID, c.SignedInUser, c.SkipCache)
|
||||
if err != nil {
|
||||
if errors.Is(err, models.ErrDataSourceAccessDenied) {
|
||||
c.JsonApiErr(403, "Access denied to datasource", err)
|
||||
@@ -472,7 +472,7 @@ func convertModelToDtos(ds *models.DataSource) dtos.DataSource {
|
||||
func (hs *HTTPServer) CheckDatasourceHealth(c *models.ReqContext) response.Response {
|
||||
datasourceID := c.ParamsInt64(":id")
|
||||
|
||||
ds, err := hs.DataSourceCache.GetDatasource(datasourceID, c.SignedInUser, c.SkipCache)
|
||||
ds, err := hs.DataSourceCache.GetDatasource(c.Req.Context(), datasourceID, c.SignedInUser, c.SkipCache)
|
||||
if err != nil {
|
||||
if errors.Is(err, models.ErrDataSourceAccessDenied) {
|
||||
return response.Error(403, "Access denied to datasource", err)
|
||||
|
@@ -162,7 +162,7 @@ func TestOrgUsersAPIEndpoint_LegacyAccessControl_FolderAdmin(t *testing.T) {
|
||||
Updated: time.Now(),
|
||||
},
|
||||
}
|
||||
err = sc.db.UpdateDashboardACL(folder.Id, acls)
|
||||
err = sc.db.UpdateDashboardACL(context.Background(), folder.Id, acls)
|
||||
require.NoError(t, err)
|
||||
|
||||
response := callAPI(sc.server, http.MethodGet, "/api/org/users/lookup", nil, t)
|
||||
@@ -629,7 +629,7 @@ func TestPatchOrgUsersAPIEndpoint_AccessControl(t *testing.T) {
|
||||
UserId: tc.targetUserId,
|
||||
OrgId: tc.targetOrg,
|
||||
}
|
||||
err = sqlstore.GetSignedInUser(context.TODO(), &getUserQuery)
|
||||
err = sqlstore.GetSignedInUser(context.Background(), &getUserQuery)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedUserRole, getUserQuery.Result.OrgRole)
|
||||
}
|
||||
|
@@ -52,9 +52,9 @@ func (dc *databaseCache) internalRunGC() {
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *databaseCache) Get(key string) (interface{}, error) {
|
||||
func (dc *databaseCache) Get(ctx context.Context, key string) (interface{}, error) {
|
||||
cacheHit := CacheData{}
|
||||
session := dc.SQLStore.NewSession(context.Background())
|
||||
session := dc.SQLStore.NewSession(ctx)
|
||||
defer session.Close()
|
||||
|
||||
exist, err := session.Where("cache_key= ?", key).Get(&cacheHit)
|
||||
@@ -70,7 +70,7 @@ func (dc *databaseCache) Get(key string) (interface{}, error) {
|
||||
if cacheHit.Expires > 0 {
|
||||
existedButExpired := getTime().Unix()-cacheHit.CreatedAt >= cacheHit.Expires
|
||||
if existedButExpired {
|
||||
err = dc.Delete(key) // ignore this error since we will return `ErrCacheItemNotFound` anyway
|
||||
err = dc.Delete(ctx, key) // ignore this error since we will return `ErrCacheItemNotFound` anyway
|
||||
if err != nil {
|
||||
dc.log.Debug("Deletion of expired key failed: %v", err)
|
||||
}
|
||||
@@ -86,7 +86,7 @@ func (dc *databaseCache) Get(key string) (interface{}, error) {
|
||||
return item.Val, nil
|
||||
}
|
||||
|
||||
func (dc *databaseCache) Set(key string, value interface{}, expire time.Duration) error {
|
||||
func (dc *databaseCache) Set(ctx context.Context, key string, value interface{}, expire time.Duration) error {
|
||||
item := &cachedItem{Val: value}
|
||||
data, err := encodeGob(item)
|
||||
if err != nil {
|
||||
@@ -123,8 +123,8 @@ func (dc *databaseCache) Set(key string, value interface{}, expire time.Duration
|
||||
return err
|
||||
}
|
||||
|
||||
func (dc *databaseCache) Delete(key string) error {
|
||||
return dc.SQLStore.WithDbSession(context.Background(), func(session *sqlstore.DBSession) error {
|
||||
func (dc *databaseCache) Delete(ctx context.Context, key string) error {
|
||||
return dc.SQLStore.WithDbSession(ctx, func(session *sqlstore.DBSession) error {
|
||||
sql := "DELETE FROM cache_data WHERE cache_key=?"
|
||||
_, err := session.Exec(sql, key)
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package remotecache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -22,37 +23,37 @@ func TestDatabaseStorageGarbageCollection(t *testing.T) {
|
||||
// set time.now to 2 weeks ago
|
||||
var err error
|
||||
getTime = func() time.Time { return time.Now().AddDate(0, 0, -2) }
|
||||
err = db.Set("key1", obj, 1000*time.Second)
|
||||
err = db.Set(context.Background(), "key1", obj, 1000*time.Second)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
err = db.Set("key2", obj, 1000*time.Second)
|
||||
err = db.Set(context.Background(), "key2", obj, 1000*time.Second)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
err = db.Set("key3", obj, 1000*time.Second)
|
||||
err = db.Set(context.Background(), "key3", obj, 1000*time.Second)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
// insert object that should never expire
|
||||
err = db.Set("key4", obj, 0)
|
||||
err = db.Set(context.Background(), "key4", obj, 0)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
getTime = time.Now
|
||||
err = db.Set("key5", obj, 1000*time.Second)
|
||||
err = db.Set(context.Background(), "key5", obj, 1000*time.Second)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
// run GC
|
||||
db.internalRunGC()
|
||||
|
||||
// try to read values
|
||||
_, err = db.Get("key1")
|
||||
_, err = db.Get(context.Background(), "key1")
|
||||
assert.Equal(t, err, ErrCacheItemNotFound, "expected cache item not found. got: ", err)
|
||||
_, err = db.Get("key2")
|
||||
_, err = db.Get(context.Background(), "key2")
|
||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||
_, err = db.Get("key3")
|
||||
_, err = db.Get(context.Background(), "key3")
|
||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||
|
||||
_, err = db.Get("key4")
|
||||
_, err = db.Get(context.Background(), "key4")
|
||||
assert.Equal(t, err, nil)
|
||||
_, err = db.Get("key5")
|
||||
_, err = db.Get(context.Background(), "key5")
|
||||
assert.Equal(t, err, nil)
|
||||
}
|
||||
|
||||
@@ -67,9 +68,9 @@ func TestSecondSet(t *testing.T) {
|
||||
|
||||
obj := &CacheableStruct{String: "hey!"}
|
||||
|
||||
err = db.Set("killa-gorilla", obj, 0)
|
||||
err = db.Set(context.Background(), "killa-gorilla", obj, 0)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
err = db.Set("killa-gorilla", obj, 0)
|
||||
err = db.Set(context.Background(), "killa-gorilla", obj, 0)
|
||||
assert.Equal(t, err, nil)
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package remotecache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/bradfitz/gomemcache/memcache"
|
||||
@@ -28,7 +29,7 @@ func newItem(sid string, data []byte, expire int32) *memcache.Item {
|
||||
}
|
||||
|
||||
// Set sets value to given key in the cache.
|
||||
func (s *memcachedStorage) Set(key string, val interface{}, expires time.Duration) error {
|
||||
func (s *memcachedStorage) Set(ctx context.Context, key string, val interface{}, expires time.Duration) error {
|
||||
item := &cachedItem{Val: val}
|
||||
bytes, err := encodeGob(item)
|
||||
if err != nil {
|
||||
@@ -45,7 +46,7 @@ func (s *memcachedStorage) Set(key string, val interface{}, expires time.Duratio
|
||||
}
|
||||
|
||||
// Get gets value by given key in the cache.
|
||||
func (s *memcachedStorage) Get(key string) (interface{}, error) {
|
||||
func (s *memcachedStorage) Get(ctx context.Context, key string) (interface{}, error) {
|
||||
memcachedItem, err := s.c.Get(key)
|
||||
if err != nil && err.Error() == "memcache: cache miss" {
|
||||
return nil, ErrCacheItemNotFound
|
||||
@@ -66,6 +67,6 @@ func (s *memcachedStorage) Get(key string) (interface{}, error) {
|
||||
}
|
||||
|
||||
// Delete delete a key from the cache
|
||||
func (s *memcachedStorage) Delete(key string) error {
|
||||
func (s *memcachedStorage) Delete(ctx context.Context, key string) error {
|
||||
return s.c.Delete(key)
|
||||
}
|
||||
|
@@ -86,19 +86,19 @@ func newRedisStorage(opts *setting.RemoteCacheOptions) (*redisStorage, error) {
|
||||
}
|
||||
|
||||
// Set sets value to given key in session.
|
||||
func (s *redisStorage) Set(key string, val interface{}, expires time.Duration) error {
|
||||
func (s *redisStorage) Set(ctx context.Context, key string, val interface{}, expires time.Duration) error {
|
||||
item := &cachedItem{Val: val}
|
||||
value, err := encodeGob(item)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
status := s.c.Set(context.TODO(), key, string(value), expires)
|
||||
status := s.c.Set(ctx, key, string(value), expires)
|
||||
return status.Err()
|
||||
}
|
||||
|
||||
// Get gets value by given key in session.
|
||||
func (s *redisStorage) Get(key string) (interface{}, error) {
|
||||
v := s.c.Get(context.TODO(), key)
|
||||
func (s *redisStorage) Get(ctx context.Context, key string) (interface{}, error) {
|
||||
v := s.c.Get(ctx, key)
|
||||
|
||||
item := &cachedItem{}
|
||||
err := decodeGob([]byte(v.Val()), item)
|
||||
@@ -113,7 +113,7 @@ func (s *redisStorage) Get(key string) (interface{}, error) {
|
||||
}
|
||||
|
||||
// Delete delete a key from session.
|
||||
func (s *redisStorage) Delete(key string) error {
|
||||
cmd := s.c.Del(context.TODO(), key)
|
||||
func (s *redisStorage) Delete(ctx context.Context, key string) error {
|
||||
cmd := s.c.Del(ctx, key)
|
||||
return cmd.Err()
|
||||
}
|
||||
|
@@ -28,7 +28,7 @@ const (
|
||||
)
|
||||
|
||||
func ProvideService(cfg *setting.Cfg, sqlStore *sqlstore.SQLStore) (*RemoteCache, error) {
|
||||
client, err := createClient(cfg.RemoteCacheOptions, sqlStore)
|
||||
client, err := createClient(context.Background(), cfg.RemoteCacheOptions, sqlStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -47,13 +47,13 @@ func ProvideService(cfg *setting.Cfg, sqlStore *sqlstore.SQLStore) (*RemoteCache
|
||||
// ex `remotecache.Register(CacheableStruct{})``
|
||||
type CacheStorage interface {
|
||||
// Get reads object from Cache
|
||||
Get(key string) (interface{}, error)
|
||||
Get(ctx context.Context, key string) (interface{}, error)
|
||||
|
||||
// Set sets an object into the cache. if `expire` is set to zero it will default to 24h
|
||||
Set(key string, value interface{}, expire time.Duration) error
|
||||
Set(ctx context.Context, key string, value interface{}, expire time.Duration) error
|
||||
|
||||
// Delete object from cache
|
||||
Delete(key string) error
|
||||
Delete(ctx context.Context, key string) error
|
||||
}
|
||||
|
||||
// RemoteCache allows Grafana to cache data outside its own process
|
||||
@@ -65,22 +65,22 @@ type RemoteCache struct {
|
||||
}
|
||||
|
||||
// Get reads object from Cache
|
||||
func (ds *RemoteCache) Get(key string) (interface{}, error) {
|
||||
return ds.client.Get(key)
|
||||
func (ds *RemoteCache) Get(ctx context.Context, key string) (interface{}, error) {
|
||||
return ds.client.Get(ctx, key)
|
||||
}
|
||||
|
||||
// Set sets an object into the cache. if `expire` is set to zero it will default to 24h
|
||||
func (ds *RemoteCache) Set(key string, value interface{}, expire time.Duration) error {
|
||||
func (ds *RemoteCache) Set(ctx context.Context, key string, value interface{}, expire time.Duration) error {
|
||||
if expire == 0 {
|
||||
expire = defaultMaxCacheExpiration
|
||||
}
|
||||
|
||||
return ds.client.Set(key, value, expire)
|
||||
return ds.client.Set(ctx, key, value, expire)
|
||||
}
|
||||
|
||||
// Delete object from cache
|
||||
func (ds *RemoteCache) Delete(key string) error {
|
||||
return ds.client.Delete(key)
|
||||
func (ds *RemoteCache) Delete(ctx context.Context, key string) error {
|
||||
return ds.client.Delete(ctx, key)
|
||||
}
|
||||
|
||||
// Run starts the backend processes for cache clients.
|
||||
@@ -95,7 +95,7 @@ func (ds *RemoteCache) Run(ctx context.Context) error {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
func createClient(opts *setting.RemoteCacheOptions, sqlstore *sqlstore.SQLStore) (CacheStorage, error) {
|
||||
func createClient(ctx context.Context, opts *setting.RemoteCacheOptions, sqlstore *sqlstore.SQLStore) (CacheStorage, error) {
|
||||
if opts.Name == redisCacheType {
|
||||
return newRedisStorage(opts)
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package remotecache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -43,7 +44,7 @@ func TestCachedBasedOnConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInvalidCacheTypeReturnsError(t *testing.T) {
|
||||
_, err := createClient(&setting.RemoteCacheOptions{Name: "invalid"}, nil)
|
||||
_, err := createClient(context.Background(), &setting.RemoteCacheOptions{Name: "invalid"}, nil)
|
||||
assert.Equal(t, err, ErrInvalidCacheType)
|
||||
}
|
||||
|
||||
@@ -55,10 +56,10 @@ func runTestsForClient(t *testing.T, client CacheStorage) {
|
||||
func canPutGetAndDeleteCachedObjects(t *testing.T, client CacheStorage) {
|
||||
cacheableStruct := CacheableStruct{String: "hej", Int64: 2000}
|
||||
|
||||
err := client.Set("key1", cacheableStruct, 0)
|
||||
err := client.Set(context.Background(), "key1", cacheableStruct, 0)
|
||||
assert.Equal(t, err, nil, "expected nil. got: ", err)
|
||||
|
||||
data, err := client.Get("key1")
|
||||
data, err := client.Get(context.Background(), "key1")
|
||||
assert.Equal(t, err, nil)
|
||||
s, ok := data.(CacheableStruct)
|
||||
|
||||
@@ -66,23 +67,23 @@ func canPutGetAndDeleteCachedObjects(t *testing.T, client CacheStorage) {
|
||||
assert.Equal(t, s.String, "hej")
|
||||
assert.Equal(t, s.Int64, int64(2000))
|
||||
|
||||
err = client.Delete("key1")
|
||||
err = client.Delete(context.Background(), "key1")
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
_, err = client.Get("key1")
|
||||
_, err = client.Get(context.Background(), "key1")
|
||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||
}
|
||||
|
||||
func canNotFetchExpiredItems(t *testing.T, client CacheStorage) {
|
||||
cacheableStruct := CacheableStruct{String: "hej", Int64: 2000}
|
||||
|
||||
err := client.Set("key1", cacheableStruct, time.Second)
|
||||
err := client.Set(context.Background(), "key1", cacheableStruct, time.Second)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
// not sure how this can be avoided when testing redis/memcached :/
|
||||
<-time.After(time.Second + time.Millisecond)
|
||||
|
||||
// should not be able to read that value since its expired
|
||||
_, err = client.Get("key1")
|
||||
_, err = client.Get(context.Background(), "key1")
|
||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||
}
|
||||
|
@@ -51,9 +51,9 @@ func (uss *UsageStats) GetUsageReport(ctx context.Context) (usagestats.Report, e
|
||||
metrics["stats.viewers.count"] = statsQuery.Result.Viewers
|
||||
metrics["stats.orgs.count"] = statsQuery.Result.Orgs
|
||||
metrics["stats.playlist.count"] = statsQuery.Result.Playlists
|
||||
metrics["stats.plugins.apps.count"] = uss.appCount()
|
||||
metrics["stats.plugins.panels.count"] = uss.panelCount()
|
||||
metrics["stats.plugins.datasources.count"] = uss.dataSourceCount()
|
||||
metrics["stats.plugins.apps.count"] = uss.appCount(ctx)
|
||||
metrics["stats.plugins.panels.count"] = uss.panelCount(ctx)
|
||||
metrics["stats.plugins.datasources.count"] = uss.dataSourceCount(ctx)
|
||||
metrics["stats.alerts.count"] = statsQuery.Result.Alerts
|
||||
metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers
|
||||
metrics["stats.active_admins.count"] = statsQuery.Result.ActiveAdmins
|
||||
@@ -366,14 +366,14 @@ func (uss *UsageStats) GetUsageStatsId(ctx context.Context) string {
|
||||
return anonId
|
||||
}
|
||||
|
||||
func (uss *UsageStats) appCount() int {
|
||||
return len(uss.pluginStore.Plugins(context.TODO(), plugins.App))
|
||||
func (uss *UsageStats) appCount(ctx context.Context) int {
|
||||
return len(uss.pluginStore.Plugins(ctx, plugins.App))
|
||||
}
|
||||
|
||||
func (uss *UsageStats) panelCount() int {
|
||||
return len(uss.pluginStore.Plugins(context.TODO(), plugins.Panel))
|
||||
func (uss *UsageStats) panelCount(ctx context.Context) int {
|
||||
return len(uss.pluginStore.Plugins(ctx, plugins.Panel))
|
||||
}
|
||||
|
||||
func (uss *UsageStats) dataSourceCount() int {
|
||||
return len(uss.pluginStore.Plugins(context.TODO(), plugins.DataSource))
|
||||
func (uss *UsageStats) dataSourceCount(ctx context.Context) int {
|
||||
return len(uss.pluginStore.Plugins(ctx, plugins.DataSource))
|
||||
}
|
||||
|
@@ -317,9 +317,9 @@ func TestMetrics(t *testing.T) {
|
||||
assert.Equal(t, getSystemStatsQuery.Result.Viewers, metrics.Get("stats.viewers.count").MustInt64())
|
||||
assert.Equal(t, getSystemStatsQuery.Result.Orgs, metrics.Get("stats.orgs.count").MustInt64())
|
||||
assert.Equal(t, getSystemStatsQuery.Result.Playlists, metrics.Get("stats.playlist.count").MustInt64())
|
||||
assert.Equal(t, uss.appCount(), metrics.Get("stats.plugins.apps.count").MustInt())
|
||||
assert.Equal(t, uss.panelCount(), metrics.Get("stats.plugins.panels.count").MustInt())
|
||||
assert.Equal(t, uss.dataSourceCount(), metrics.Get("stats.plugins.datasources.count").MustInt())
|
||||
assert.Equal(t, uss.appCount(context.Background()), metrics.Get("stats.plugins.apps.count").MustInt())
|
||||
assert.Equal(t, uss.panelCount(context.Background()), metrics.Get("stats.plugins.panels.count").MustInt())
|
||||
assert.Equal(t, uss.dataSourceCount(context.Background()), metrics.Get("stats.plugins.datasources.count").MustInt())
|
||||
assert.Equal(t, getSystemStatsQuery.Result.Alerts, metrics.Get("stats.alerts.count").MustInt64())
|
||||
assert.Equal(t, getSystemStatsQuery.Result.ActiveUsers, metrics.Get("stats.active_users.count").MustInt64())
|
||||
assert.Equal(t, getSystemStatsQuery.Result.ActiveAdmins, metrics.Get("stats.active_admins.count").MustInt64())
|
||||
|
@@ -373,7 +373,7 @@ func TestMiddlewareContext(t *testing.T) {
|
||||
h, err := authproxy.HashCacheKey(hdrName + "-" + group)
|
||||
require.NoError(t, err)
|
||||
key := fmt.Sprintf(authproxy.CachePrefix, h)
|
||||
err = sc.remoteCacheService.Set(key, userID, 0)
|
||||
err = sc.remoteCacheService.Set(context.Background(), key, userID, 0)
|
||||
require.NoError(t, err)
|
||||
sc.fakeReq("GET", "/")
|
||||
|
||||
|
@@ -114,7 +114,7 @@ func (m *PluginManager) init() error {
|
||||
func (m *PluginManager) Run(ctx context.Context) error {
|
||||
if m.cfg.CheckForUpdates {
|
||||
go func() {
|
||||
m.checkForUpdates()
|
||||
m.checkForUpdates(ctx)
|
||||
|
||||
ticker := time.NewTicker(time.Minute * 10)
|
||||
run := true
|
||||
@@ -122,7 +122,7 @@ func (m *PluginManager) Run(ctx context.Context) error {
|
||||
for run {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
m.checkForUpdates()
|
||||
m.checkForUpdates(ctx)
|
||||
case <-ctx.Done():
|
||||
run = false
|
||||
}
|
||||
|
@@ -20,7 +20,7 @@ type gcomPlugin struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
func (m *PluginManager) checkForUpdates() {
|
||||
func (m *PluginManager) checkForUpdates(ctx context.Context) {
|
||||
if !m.cfg.CheckForUpdates {
|
||||
return
|
||||
}
|
||||
@@ -52,7 +52,7 @@ func (m *PluginManager) checkForUpdates() {
|
||||
return
|
||||
}
|
||||
|
||||
for _, localP := range m.Plugins(context.TODO()) {
|
||||
for _, localP := range m.Plugins(ctx) {
|
||||
for _, gcomP := range gcomPlugins {
|
||||
if gcomP.Slug == localP.ID {
|
||||
localP.GrafanaComVersion = gcomP.Version
|
||||
|
@@ -373,7 +373,7 @@ func (s *testRenderService) RenderErrorImage(theme rendering.Theme, err error) (
|
||||
return &rendering.RenderResult{FilePath: "image.png"}, nil
|
||||
}
|
||||
|
||||
func (s *testRenderService) GetRenderUser(key string) (*rendering.RenderUser, bool) {
|
||||
func (s *testRenderService) GetRenderUser(ctx context.Context, key string) (*rendering.RenderUser, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
|
@@ -171,7 +171,7 @@ func (ks *keySetHTTP) getJWKS(ctx context.Context) (keySetJWKS, error) {
|
||||
var jwks keySetJWKS
|
||||
|
||||
if ks.cacheExpiration > 0 {
|
||||
if val, err := ks.cache.Get(ks.cacheKey); err == nil {
|
||||
if val, err := ks.cache.Get(ctx, ks.cacheKey); err == nil {
|
||||
err := json.Unmarshal(val.([]byte), &jwks)
|
||||
return jwks, err
|
||||
}
|
||||
@@ -200,7 +200,7 @@ func (ks *keySetHTTP) getJWKS(ctx context.Context) (keySetJWKS, error) {
|
||||
}
|
||||
|
||||
if ks.cacheExpiration > 0 {
|
||||
err = ks.cache.Set(ks.cacheKey, jsonBuf.Bytes(), ks.cacheExpiration)
|
||||
err = ks.cache.Set(ctx, ks.cacheKey, jsonBuf.Bytes(), ks.cacheExpiration)
|
||||
}
|
||||
return jwks, err
|
||||
}
|
||||
|
@@ -67,7 +67,7 @@ func TestInitContextWithAuthProxy_CachedInvalidUserID(t *testing.T) {
|
||||
key := fmt.Sprintf(authproxy.CachePrefix, h)
|
||||
|
||||
t.Logf("Injecting stale user ID in cache with key %q", key)
|
||||
err = svc.RemoteCache.Set(key, int64(33), 0)
|
||||
err = svc.RemoteCache.Set(context.Background(), key, int64(33), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
authEnabled := svc.initContextWithAuthProxy(ctx, orgID)
|
||||
@@ -76,7 +76,7 @@ func TestInitContextWithAuthProxy_CachedInvalidUserID(t *testing.T) {
|
||||
require.Equal(t, userID, ctx.SignedInUser.UserId)
|
||||
require.True(t, ctx.IsSignedIn)
|
||||
|
||||
i, err := svc.RemoteCache.Get(key)
|
||||
i, err := svc.RemoteCache.Get(context.Background(), key)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, userID, i.(int64))
|
||||
}
|
||||
|
@@ -204,7 +204,7 @@ func (auth *AuthProxy) GetUserViaCache(logger log.Logger) (int64, error) {
|
||||
return 0, err
|
||||
}
|
||||
logger.Debug("Getting user ID via auth cache", "cacheKey", cacheKey)
|
||||
userID, err := auth.remoteCache.Get(cacheKey)
|
||||
userID, err := auth.remoteCache.Get(auth.ctx.Req.Context(), cacheKey)
|
||||
if err != nil {
|
||||
logger.Debug("Failed getting user ID via auth cache", "error", err)
|
||||
return 0, err
|
||||
@@ -221,7 +221,7 @@ func (auth *AuthProxy) RemoveUserFromCache(logger log.Logger) error {
|
||||
return err
|
||||
}
|
||||
logger.Debug("Removing user from auth cache", "cacheKey", cacheKey)
|
||||
if err := auth.remoteCache.Delete(cacheKey); err != nil {
|
||||
if err := auth.remoteCache.Delete(auth.ctx.Req.Context(), cacheKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -349,14 +349,14 @@ func (auth *AuthProxy) Remember(id int64) error {
|
||||
}
|
||||
|
||||
// Check if user already in cache
|
||||
userID, err := auth.remoteCache.Get(key)
|
||||
userID, err := auth.remoteCache.Get(auth.ctx.Req.Context(), key)
|
||||
if err == nil && userID != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
expiration := time.Duration(auth.cfg.AuthProxySyncTTL) * time.Minute
|
||||
|
||||
if err := auth.remoteCache.Set(key, id, expiration); err != nil {
|
||||
if err := auth.remoteCache.Set(auth.ctx.Req.Context(), key, id, expiration); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@@ -87,7 +87,7 @@ func TestMiddlewareContext(t *testing.T) {
|
||||
h, err := HashCacheKey(hdrName)
|
||||
require.NoError(t, err)
|
||||
key := fmt.Sprintf(CachePrefix, h)
|
||||
err = cache.Set(key, id, 0)
|
||||
err = cache.Set(context.Background(), key, id, 0)
|
||||
require.NoError(t, err)
|
||||
// Set up the middleware
|
||||
auth := prepareMiddleware(t, cache, nil)
|
||||
@@ -109,7 +109,7 @@ func TestMiddlewareContext(t *testing.T) {
|
||||
h, err := HashCacheKey(hdrName + "-" + group + "-" + role)
|
||||
require.NoError(t, err)
|
||||
key := fmt.Sprintf(CachePrefix, h)
|
||||
err = cache.Set(key, id, 0)
|
||||
err = cache.Set(context.Background(), key, id, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
auth := prepareMiddleware(t, cache, func(req *http.Request, cfg *setting.Cfg) {
|
||||
|
@@ -399,7 +399,7 @@ func (h *ContextHandler) initContextWithRenderAuth(reqContext *models.ReqContext
|
||||
span, _ := opentracing.StartSpanFromContext(reqContext.Req.Context(), "initContextWithRenderAuth")
|
||||
defer span.Finish()
|
||||
|
||||
renderUser, exists := h.RenderService.GetRenderUser(key)
|
||||
renderUser, exists := h.RenderService.GetRenderUser(reqContext.Req.Context(), key)
|
||||
if !exists {
|
||||
reqContext.JsonApiErr(401, "Invalid Render Key", nil)
|
||||
return true
|
||||
|
@@ -376,7 +376,7 @@ func (s *FakeDashboardService) SaveDashboard(ctx context.Context, dto *SaveDashb
|
||||
}
|
||||
|
||||
func (s *FakeDashboardService) ImportDashboard(ctx context.Context, dto *SaveDashboardDTO) (*models.Dashboard, error) {
|
||||
return s.SaveDashboard(context.Background(), dto, true)
|
||||
return s.SaveDashboard(ctx, dto, true)
|
||||
}
|
||||
|
||||
func (s *FakeDashboardService) DeleteDashboard(ctx context.Context, dashboardId int64, orgId int64) error {
|
||||
|
@@ -48,7 +48,7 @@ func (p *DataSourceProxyService) ProxyDataSourceRequest(c *models.ReqContext) {
|
||||
func (p *DataSourceProxyService) ProxyDatasourceRequestWithID(c *models.ReqContext, dsID int64) {
|
||||
c.TimeRequest(metrics.MDataSourceProxyReqTimer)
|
||||
|
||||
ds, err := p.DataSourceCache.GetDatasource(dsID, c.SignedInUser, c.SkipCache)
|
||||
ds, err := p.DataSourceCache.GetDatasource(c.Req.Context(), dsID, c.SignedInUser, c.SkipCache)
|
||||
if err != nil {
|
||||
if errors.Is(err, models.ErrDataSourceAccessDenied) {
|
||||
c.JsonApiErr(http.StatusForbidden, "Access denied to datasource", err)
|
||||
|
@@ -18,7 +18,7 @@ func ProvideCacheService(cacheService *localcache.CacheService, sqlStore *sqlsto
|
||||
}
|
||||
|
||||
type CacheService interface {
|
||||
GetDatasource(datasourceID int64, user *models.SignedInUser, skipCache bool) (*models.DataSource, error)
|
||||
GetDatasource(ctx context.Context, datasourceID int64, user *models.SignedInUser, skipCache bool) (*models.DataSource, error)
|
||||
GetDatasourceByUID(ctx context.Context, datasourceUID string, user *models.SignedInUser, skipCache bool) (*models.DataSource, error)
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ type CacheServiceImpl struct {
|
||||
}
|
||||
|
||||
func (dc *CacheServiceImpl) GetDatasource(
|
||||
ctx context.Context,
|
||||
datasourceID int64,
|
||||
user *models.SignedInUser,
|
||||
skipCache bool,
|
||||
@@ -46,7 +47,7 @@ func (dc *CacheServiceImpl) GetDatasource(
|
||||
plog.Debug("Querying for data source via SQL store", "id", datasourceID, "orgId", user.OrgId)
|
||||
|
||||
query := &models.GetDataSourceQuery{Id: datasourceID, OrgId: user.OrgId}
|
||||
err := dc.SQLStore.GetDataSource(context.TODO(), query)
|
||||
err := dc.SQLStore.GetDataSource(ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -243,7 +243,7 @@ func updateFolderACL(t *testing.T, sqlStore *sqlstore.SQLStore, folderID int64,
|
||||
})
|
||||
}
|
||||
|
||||
err := sqlStore.UpdateDashboardACL(folderID, aclItems)
|
||||
err := sqlStore.UpdateDashboardACL(context.Background(), folderID, aclItems)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@@ -1462,7 +1462,7 @@ func updateFolderACL(t *testing.T, sqlStore *sqlstore.SQLStore, folderID int64,
|
||||
})
|
||||
}
|
||||
|
||||
err := sqlStore.UpdateDashboardACL(folderID, aclItems)
|
||||
err := sqlStore.UpdateDashboardACL(context.Background(), folderID, aclItems)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -41,7 +42,7 @@ func (srv TestingApiSrv) RouteTestRuleConfig(c *models.ReqContext, body apimodel
|
||||
|
||||
var path string
|
||||
if datasourceID, err := strconv.ParseInt(recipient, 10, 64); err == nil {
|
||||
ds, err := srv.DatasourceCache.GetDatasource(datasourceID, c.SignedInUser, c.SkipCache)
|
||||
ds, err := srv.DatasourceCache.GetDatasource(context.Background(), datasourceID, c.SignedInUser, c.SkipCache)
|
||||
if err != nil {
|
||||
return ErrResp(http.StatusInternalServerError, err, "failed to get datasource")
|
||||
}
|
||||
|
@@ -59,7 +59,7 @@ func (am *LotexAM) withAMReq(
|
||||
extractor func(*response.NormalResponse) (interface{}, error),
|
||||
headers map[string]string,
|
||||
) response.Response {
|
||||
ds, err := am.DataProxy.DataSourceCache.GetDatasource(ctx.ParamsInt64(":Recipient"), ctx.SignedInUser, ctx.SkipCache)
|
||||
ds, err := am.DataProxy.DataSourceCache.GetDatasource(ctx.Req.Context(), ctx.ParamsInt64(":Recipient"), ctx.SignedInUser, ctx.SkipCache)
|
||||
if err != nil {
|
||||
if errors.Is(err, models.ErrDataSourceAccessDenied) {
|
||||
return ErrResp(http.StatusForbidden, err, "Access denied to datasource")
|
||||
|
@@ -76,7 +76,7 @@ func (p *LotexProm) RouteGetRuleStatuses(ctx *models.ReqContext) response.Respon
|
||||
}
|
||||
|
||||
func (p *LotexProm) getEndpoints(ctx *models.ReqContext) (*promEndpoints, error) {
|
||||
ds, err := p.DataProxy.DataSourceCache.GetDatasource(ctx.ParamsInt64(":Recipient"), ctx.SignedInUser, ctx.SkipCache)
|
||||
ds, err := p.DataProxy.DataSourceCache.GetDatasource(ctx.Req.Context(), ctx.ParamsInt64(":Recipient"), ctx.SignedInUser, ctx.SkipCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -152,7 +152,7 @@ func (r *LotexRuler) RoutePostNameRulesConfig(ctx *models.ReqContext, conf apimo
|
||||
}
|
||||
|
||||
func (r *LotexRuler) validateAndGetPrefix(ctx *models.ReqContext) (string, error) {
|
||||
ds, err := r.DataProxy.DataSourceCache.GetDatasource(ctx.ParamsInt64(":Recipient"), ctx.SignedInUser, ctx.SkipCache)
|
||||
ds, err := r.DataProxy.DataSourceCache.GetDatasource(ctx.Req.Context(), ctx.ParamsInt64(":Recipient"), ctx.SignedInUser, ctx.SkipCache)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@@ -46,7 +46,7 @@ func backendType(ctx *models.ReqContext, cache datasources.CacheService) (apimod
|
||||
return apimodels.GrafanaBackend, nil
|
||||
}
|
||||
if datasourceID, err := strconv.ParseInt(recipient, 10, 64); err == nil {
|
||||
if ds, err := cache.GetDatasource(datasourceID, ctx.SignedInUser, ctx.SkipCache); err == nil {
|
||||
if ds, err := cache.GetDatasource(ctx.Req.Context(), datasourceID, ctx.SignedInUser, ctx.SkipCache); err == nil {
|
||||
switch ds.Type {
|
||||
case "loki", "prometheus":
|
||||
return apimodels.LoTexRulerBackend, nil
|
||||
|
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
type configReader interface {
|
||||
readConfig(path string) ([]*pluginsAsConfig, error)
|
||||
readConfig(ctx context.Context, path string) ([]*pluginsAsConfig, error)
|
||||
}
|
||||
|
||||
type configReaderImpl struct {
|
||||
@@ -26,7 +26,7 @@ func newConfigReader(logger log.Logger, pluginStore plugins.Store) configReader
|
||||
return &configReaderImpl{log: logger, pluginStore: pluginStore}
|
||||
}
|
||||
|
||||
func (cr *configReaderImpl) readConfig(path string) ([]*pluginsAsConfig, error) {
|
||||
func (cr *configReaderImpl) readConfig(ctx context.Context, path string) ([]*pluginsAsConfig, error) {
|
||||
var apps []*pluginsAsConfig
|
||||
cr.log.Debug("Looking for plugin provisioning files", "path", path)
|
||||
|
||||
@@ -57,7 +57,7 @@ func (cr *configReaderImpl) readConfig(path string) ([]*pluginsAsConfig, error)
|
||||
|
||||
checkOrgIDAndOrgName(apps)
|
||||
|
||||
err = cr.validatePluginsConfig(apps)
|
||||
err = cr.validatePluginsConfig(ctx, apps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -107,14 +107,14 @@ func validateRequiredField(apps []*pluginsAsConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *configReaderImpl) validatePluginsConfig(apps []*pluginsAsConfig) error {
|
||||
func (cr *configReaderImpl) validatePluginsConfig(ctx context.Context, apps []*pluginsAsConfig) error {
|
||||
for i := range apps {
|
||||
if apps[i].Apps == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, app := range apps[i].Apps {
|
||||
if _, exists := cr.pluginStore.Plugin(context.TODO(), app.PluginID); !exists {
|
||||
if _, exists := cr.pluginStore.Plugin(ctx, app.PluginID); !exists {
|
||||
return fmt.Errorf("plugin not installed: %q", app.PluginID)
|
||||
}
|
||||
}
|
||||
|
@@ -21,27 +21,27 @@ const (
|
||||
func TestConfigReader(t *testing.T) {
|
||||
t.Run("Broken yaml should return error", func(t *testing.T) {
|
||||
reader := newConfigReader(log.New("test logger"), nil)
|
||||
_, err := reader.readConfig(brokenYaml)
|
||||
_, err := reader.readConfig(context.Background(), brokenYaml)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Skip invalid directory", func(t *testing.T) {
|
||||
cfgProvider := newConfigReader(log.New("test logger"), nil)
|
||||
cfg, err := cfgProvider.readConfig(emptyFolder)
|
||||
cfg, err := cfgProvider.readConfig(context.Background(), emptyFolder)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, cfg, 0)
|
||||
})
|
||||
|
||||
t.Run("Unknown app plugin should return error", func(t *testing.T) {
|
||||
cfgProvider := newConfigReader(log.New("test logger"), fakePluginStore{})
|
||||
_, err := cfgProvider.readConfig(unknownApp)
|
||||
_, err := cfgProvider.readConfig(context.Background(), unknownApp)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "plugin not installed: \"nonexisting\"", err.Error())
|
||||
})
|
||||
|
||||
t.Run("Read incorrect properties", func(t *testing.T) {
|
||||
cfgProvider := newConfigReader(log.New("test logger"), nil)
|
||||
_, err := cfgProvider.readConfig(incorrectSettings)
|
||||
_, err := cfgProvider.readConfig(context.Background(), incorrectSettings)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "app item 1 in configuration doesn't contain required field type", err.Error())
|
||||
})
|
||||
@@ -61,7 +61,7 @@ func TestConfigReader(t *testing.T) {
|
||||
})
|
||||
|
||||
cfgProvider := newConfigReader(log.New("test logger"), pm)
|
||||
cfg, err := cfgProvider.readConfig(correctProperties)
|
||||
cfg, err := cfgProvider.readConfig(context.Background(), correctProperties)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, cfg, 1)
|
||||
|
||||
|
@@ -69,7 +69,7 @@ func (ap *PluginProvisioner) apply(ctx context.Context, cfg *pluginsAsConfig) er
|
||||
}
|
||||
|
||||
func (ap *PluginProvisioner) applyChanges(ctx context.Context, configPath string) error {
|
||||
configs, err := ap.cfgProvider.readConfig(configPath)
|
||||
configs, err := ap.cfgProvider.readConfig(ctx, configPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -91,6 +91,6 @@ type testConfigReader struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (tcr *testConfigReader) readConfig(path string) ([]*pluginsAsConfig, error) {
|
||||
func (tcr *testConfigReader) readConfig(ctx context.Context, path string) ([]*pluginsAsConfig, error) {
|
||||
return tcr.result, tcr.err
|
||||
}
|
||||
|
@@ -238,7 +238,7 @@ func (s *Service) getDataSourceFromQuery(ctx context.Context, user *models.Signe
|
||||
// use datasourceId if it exists
|
||||
id := query.Get("datasourceId").MustInt64(0)
|
||||
if id > 0 {
|
||||
ds, err = s.dataSourceCache.GetDatasource(id, user, skipCache)
|
||||
ds, err = s.dataSourceCache.GetDatasource(ctx, id, user, skipCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -72,5 +72,5 @@ type Service interface {
|
||||
Render(ctx context.Context, opts Opts) (*RenderResult, error)
|
||||
RenderCSV(ctx context.Context, opts CSVOpts) (*RenderCSVResult, error)
|
||||
RenderErrorImage(theme Theme, error error) (*RenderResult, error)
|
||||
GetRenderUser(key string) (*RenderUser, bool)
|
||||
GetRenderUser(ctx context.Context, key string) (*RenderUser, bool)
|
||||
}
|
||||
|
@@ -228,12 +228,12 @@ func (rs *RenderingService) render(ctx context.Context, opts Opts) (*RenderResul
|
||||
if math.IsInf(opts.DeviceScaleFactor, 0) || math.IsNaN(opts.DeviceScaleFactor) || opts.DeviceScaleFactor <= 0 {
|
||||
opts.DeviceScaleFactor = 1
|
||||
}
|
||||
renderKey, err := rs.generateAndStoreRenderKey(opts.OrgID, opts.UserID, opts.OrgRole)
|
||||
renderKey, err := rs.generateAndStoreRenderKey(ctx, opts.OrgID, opts.UserID, opts.OrgRole)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer rs.deleteRenderKey(renderKey)
|
||||
defer rs.deleteRenderKey(ctx, renderKey)
|
||||
|
||||
defer func() {
|
||||
metrics.MRenderingQueue.Set(float64(atomic.AddInt32(&rs.inProgressCount, -1)))
|
||||
@@ -263,12 +263,12 @@ func (rs *RenderingService) renderCSV(ctx context.Context, opts CSVOpts) (*Rende
|
||||
}
|
||||
|
||||
rs.log.Info("Rendering", "path", opts.Path)
|
||||
renderKey, err := rs.generateAndStoreRenderKey(opts.OrgID, opts.UserID, opts.OrgRole)
|
||||
renderKey, err := rs.generateAndStoreRenderKey(ctx, opts.OrgID, opts.UserID, opts.OrgRole)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer rs.deleteRenderKey(renderKey)
|
||||
defer rs.deleteRenderKey(ctx, renderKey)
|
||||
|
||||
defer func() {
|
||||
metrics.MRenderingQueue.Set(float64(atomic.AddInt32(&rs.inProgressCount, -1)))
|
||||
@@ -278,8 +278,8 @@ func (rs *RenderingService) renderCSV(ctx context.Context, opts CSVOpts) (*Rende
|
||||
return rs.renderCSVAction(ctx, renderKey, opts)
|
||||
}
|
||||
|
||||
func (rs *RenderingService) GetRenderUser(key string) (*RenderUser, bool) {
|
||||
val, err := rs.RemoteCacheService.Get(fmt.Sprintf(renderKeyPrefix, key))
|
||||
func (rs *RenderingService) GetRenderUser(ctx context.Context, key string) (*RenderUser, bool) {
|
||||
val, err := rs.RemoteCacheService.Get(ctx, fmt.Sprintf(renderKeyPrefix, key))
|
||||
if err != nil {
|
||||
rs.log.Error("Failed to get render key from cache", "error", err)
|
||||
}
|
||||
@@ -338,13 +338,13 @@ func (rs *RenderingService) getURL(path string) string {
|
||||
return fmt.Sprintf("%s://%s:%s%s/%s&render=1", protocol, rs.domain, rs.Cfg.HTTPPort, subPath, path)
|
||||
}
|
||||
|
||||
func (rs *RenderingService) generateAndStoreRenderKey(orgId, userId int64, orgRole models.RoleType) (string, error) {
|
||||
func (rs *RenderingService) generateAndStoreRenderKey(ctx context.Context, orgId, userId int64, orgRole models.RoleType) (string, error) {
|
||||
key, err := util.GetRandomString(32)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = rs.RemoteCacheService.Set(fmt.Sprintf(renderKeyPrefix, key), &RenderUser{
|
||||
err = rs.RemoteCacheService.Set(ctx, fmt.Sprintf(renderKeyPrefix, key), &RenderUser{
|
||||
OrgID: orgId,
|
||||
UserID: userId,
|
||||
OrgRole: string(orgRole),
|
||||
@@ -356,8 +356,8 @@ func (rs *RenderingService) generateAndStoreRenderKey(orgId, userId int64, orgRo
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (rs *RenderingService) deleteRenderKey(key string) {
|
||||
err := rs.RemoteCacheService.Delete(fmt.Sprintf(renderKeyPrefix, key))
|
||||
func (rs *RenderingService) deleteRenderKey(ctx context.Context, key string) {
|
||||
err := rs.RemoteCacheService.Delete(ctx, fmt.Sprintf(renderKeyPrefix, key))
|
||||
if err != nil {
|
||||
rs.log.Error("Failed to delete render key", "error", err)
|
||||
}
|
||||
|
@@ -12,8 +12,8 @@ func (ss *SQLStore) addDashboardACLQueryAndCommandHandlers() {
|
||||
bus.AddHandlerCtx("sql", ss.GetDashboardAclInfoList)
|
||||
}
|
||||
|
||||
func (ss *SQLStore) UpdateDashboardACL(dashboardID int64, items []*models.DashboardAcl) error {
|
||||
return ss.UpdateDashboardACLCtx(context.TODO(), dashboardID, items)
|
||||
func (ss *SQLStore) UpdateDashboardACL(ctx context.Context, dashboardID int64, items []*models.DashboardAcl) error {
|
||||
return ss.UpdateDashboardACLCtx(ctx, dashboardID, items)
|
||||
}
|
||||
|
||||
func (ss *SQLStore) UpdateDashboardACLCtx(ctx context.Context, dashboardID int64, items []*models.DashboardAcl) error {
|
||||
|
@@ -70,7 +70,7 @@ func TestDashboardAclDataAccess(t *testing.T) {
|
||||
|
||||
t.Run("Folder with removed default permissions returns no acl items", func(t *testing.T) {
|
||||
setup(t)
|
||||
err := sqlStore.UpdateDashboardACL(savedFolder.Id, nil)
|
||||
err := sqlStore.UpdateDashboardACL(context.Background(), savedFolder.Id, nil)
|
||||
require.Nil(t, err)
|
||||
|
||||
query := models.GetDashboardAclInfoListQuery{DashboardID: childDash.Id, OrgID: 1}
|
||||
|
@@ -381,5 +381,5 @@ func testHelperUpdateDashboardAcl(t *testing.T, sqlStore *SQLStore, dashboardID
|
||||
item.Updated = time.Now()
|
||||
itemPtrs = append(itemPtrs, &item)
|
||||
}
|
||||
return sqlStore.UpdateDashboardACL(dashboardID, itemPtrs)
|
||||
return sqlStore.UpdateDashboardACL(context.Background(), dashboardID, itemPtrs)
|
||||
}
|
||||
|
@@ -287,7 +287,7 @@ func createDummyACL(t *testing.T, sqlStore *SQLStore, dashboardPermission *Dashb
|
||||
acl.Role = &dashboardPermission.Role
|
||||
}
|
||||
|
||||
err := sqlStore.UpdateDashboardACL(dashboardID, []*models.DashboardAcl{acl})
|
||||
err := sqlStore.UpdateDashboardACL(context.Background(), dashboardID, []*models.DashboardAcl{acl})
|
||||
require.NoError(t, err)
|
||||
if user != nil {
|
||||
return user.Id
|
||||
|
@@ -2,6 +2,7 @@ package alerting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -716,7 +717,7 @@ func TestPrometheusRulesPermissions(t *testing.T) {
|
||||
}
|
||||
|
||||
// remove permissions from folder2
|
||||
require.NoError(t, store.UpdateDashboardACL(2, nil))
|
||||
require.NoError(t, store.UpdateDashboardACL(context.Background(), 2, nil))
|
||||
|
||||
// make sure that folder2 is not included in the response
|
||||
{
|
||||
@@ -765,7 +766,7 @@ func TestPrometheusRulesPermissions(t *testing.T) {
|
||||
}
|
||||
|
||||
// remove permissions from _ALL_ folders
|
||||
require.NoError(t, store.UpdateDashboardACL(1, nil))
|
||||
require.NoError(t, store.UpdateDashboardACL(context.Background(), 1, nil))
|
||||
|
||||
// make sure that no folders are included in the response
|
||||
{
|
||||
|
@@ -2,6 +2,7 @@ package alerting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -174,7 +175,7 @@ func TestAlertRulePermissions(t *testing.T) {
|
||||
assert.JSONEq(t, expectedGetNamespaceResponseBody, body)
|
||||
|
||||
// remove permissions from folder2
|
||||
require.NoError(t, store.UpdateDashboardACL(2, nil))
|
||||
require.NoError(t, store.UpdateDashboardACL(context.Background(), 2, nil))
|
||||
|
||||
// make sure that folder2 is not included in the response
|
||||
// nolint:gosec
|
||||
@@ -247,7 +248,7 @@ func TestAlertRulePermissions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Remove permissions from ALL folders.
|
||||
require.NoError(t, store.UpdateDashboardACL(1, nil))
|
||||
require.NoError(t, store.UpdateDashboardACL(context.Background(), 1, nil))
|
||||
{
|
||||
u := fmt.Sprintf("http://grafana:password@%s/api/ruler/grafana/api/v1/rules", grafanaListedAddr)
|
||||
// nolint:gosec
|
||||
|
@@ -223,7 +223,7 @@ func Test_executeQueryErrorWithDifferentLogAnalyticsCreds(t *testing.T) {
|
||||
"azureLogAnalyticsSameAs": false,
|
||||
},
|
||||
}
|
||||
ctx := context.TODO()
|
||||
ctx := context.Background()
|
||||
query := &AzureLogAnalyticsQuery{
|
||||
Params: url.Values{},
|
||||
TimeRange: backend.TimeRange{},
|
||||
|
@@ -143,7 +143,7 @@ func Test_newMux(t *testing.T) {
|
||||
},
|
||||
}
|
||||
mux := s.newMux()
|
||||
res, err := mux.QueryData(context.TODO(), &backend.QueryDataRequest{
|
||||
res, err := mux.QueryData(context.Background(), &backend.QueryDataRequest{
|
||||
PluginContext: backend.PluginContext{},
|
||||
Queries: []backend.DataQuery{
|
||||
{QueryType: tt.queryType},
|
||||
|
@@ -121,7 +121,7 @@ func TestTimeSeriesQuery(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("End time before start time should result in error", func(t *testing.T) {
|
||||
_, err := executor.executeTimeSeriesQuery(context.TODO(), &backend.QueryDataRequest{Queries: []backend.DataQuery{{TimeRange: backend.TimeRange{
|
||||
_, err := executor.executeTimeSeriesQuery(context.Background(), &backend.QueryDataRequest{Queries: []backend.DataQuery{{TimeRange: backend.TimeRange{
|
||||
From: now.Add(time.Hour * -1),
|
||||
To: now.Add(time.Hour * -2),
|
||||
}}}})
|
||||
@@ -129,7 +129,7 @@ func TestTimeSeriesQuery(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("End time equals start time should result in error", func(t *testing.T) {
|
||||
_, err := executor.executeTimeSeriesQuery(context.TODO(), &backend.QueryDataRequest{Queries: []backend.DataQuery{{TimeRange: backend.TimeRange{
|
||||
_, err := executor.executeTimeSeriesQuery(context.Background(), &backend.QueryDataRequest{Queries: []backend.DataQuery{{TimeRange: backend.TimeRange{
|
||||
From: now.Add(time.Hour * -1),
|
||||
To: now.Add(time.Hour * -1),
|
||||
}}}})
|
||||
|
Reference in New Issue
Block a user