mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
renames key to cache_key
apparently key is a reserved keyword in mysql. and the error messages doesnt mention that. can I please have 6h back?
This commit is contained in:
parent
dbc1315d6f
commit
66e71b66dd
@ -64,11 +64,8 @@ jobs:
|
|||||||
working_directory: /go/src/github.com/grafana/grafana
|
working_directory: /go/src/github.com/grafana/grafana
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
#- run: sudo apt update
|
|
||||||
#- run: sudo apt install -y postgresql-client
|
|
||||||
- run: dockerize -wait tcp://127.0.0.1:11211 -timeout 120s
|
- run: dockerize -wait tcp://127.0.0.1:11211 -timeout 120s
|
||||||
- run: dockerize -wait tcp://127.0.0.1:6739 -timeout 120s
|
- run: dockerize -wait tcp://127.0.0.1:6379 -timeout 120s
|
||||||
#- run: 'PGPASSWORD=grafanatest psql -p 5432 -h 127.0.0.1 -U grafanatest -d grafanatest -f devenv/docker/blocks/postgres_tests/setup.sql'
|
|
||||||
- run:
|
- run:
|
||||||
name: cache server tests
|
name: cache server tests
|
||||||
command: './scripts/circle-test-cache-servers.sh'
|
command: './scripts/circle-test-cache-servers.sh'
|
||||||
@ -562,6 +559,8 @@ workflows:
|
|||||||
filters: *filter-not-release-or-master
|
filters: *filter-not-release-or-master
|
||||||
- postgres-integration-test:
|
- postgres-integration-test:
|
||||||
filters: *filter-not-release-or-master
|
filters: *filter-not-release-or-master
|
||||||
|
- cache-server-test:
|
||||||
|
filters: *filter-not-release-or-master
|
||||||
- grafana-docker-pr:
|
- grafana-docker-pr:
|
||||||
requires:
|
requires:
|
||||||
- build
|
- build
|
||||||
|
@ -8,6 +8,8 @@ import (
|
|||||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var getTime = time.Now
|
||||||
|
|
||||||
type databaseCache struct {
|
type databaseCache struct {
|
||||||
SQLStore *sqlstore.SqlStore
|
SQLStore *sqlstore.SqlStore
|
||||||
log log.Logger
|
log log.Logger
|
||||||
@ -34,8 +36,6 @@ func (dc *databaseCache) Run(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var getTime = time.Now
|
|
||||||
|
|
||||||
func (dc *databaseCache) internalRunGC() {
|
func (dc *databaseCache) internalRunGC() {
|
||||||
now := getTime().Unix()
|
now := getTime().Unix()
|
||||||
sql := `DELETE FROM cache_data WHERE (? - created_at) >= expires AND expires <> 0`
|
sql := `DELETE FROM cache_data WHERE (? - created_at) >= expires AND expires <> 0`
|
||||||
@ -47,19 +47,20 @@ func (dc *databaseCache) internalRunGC() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (dc *databaseCache) Get(key string) (interface{}, error) {
|
func (dc *databaseCache) Get(key string) (interface{}, error) {
|
||||||
cacheHits := []cacheData{}
|
cacheHits := []CacheData{}
|
||||||
err := dc.SQLStore.NewSession().Where(`key = ?`, key).Find(&cacheHits)
|
sess := dc.SQLStore.NewSession()
|
||||||
|
defer sess.Close()
|
||||||
|
err := sess.Where("cache_key= ?", key).Find(&cacheHits)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var cacheHit cacheData
|
|
||||||
if len(cacheHits) == 0 {
|
if len(cacheHits) == 0 {
|
||||||
return nil, ErrCacheItemNotFound
|
return nil, ErrCacheItemNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheHit = cacheHits[0]
|
cacheHit := cacheHits[0]
|
||||||
// if Expires is set. Make sure its still valid.
|
|
||||||
if cacheHit.Expires > 0 {
|
if cacheHit.Expires > 0 {
|
||||||
existedButExpired := getTime().Unix()-cacheHit.CreatedAt >= cacheHit.Expires
|
existedButExpired := getTime().Unix()-cacheHit.CreatedAt >= cacheHit.Expires
|
||||||
if existedButExpired {
|
if existedButExpired {
|
||||||
@ -83,9 +84,10 @@ func (dc *databaseCache) Set(key string, value interface{}, expire time.Duration
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
now := getTime().Unix()
|
session := dc.SQLStore.NewSession()
|
||||||
cacheHits := []cacheData{}
|
|
||||||
err = dc.SQLStore.NewSession().Where(`key = ?`, key).Find(&cacheHits)
|
var cacheHit CacheData
|
||||||
|
has, err := session.Where("cache_key = ?", key).Get(&cacheHit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -95,27 +97,28 @@ func (dc *databaseCache) Set(key string, value interface{}, expire time.Duration
|
|||||||
expiresAtEpoch = int64(expire) / int64(time.Second)
|
expiresAtEpoch = int64(expire) / int64(time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
session := dc.SQLStore.NewSession()
|
|
||||||
// insert or update depending on if item already exist
|
// insert or update depending on if item already exist
|
||||||
if len(cacheHits) > 0 {
|
if has {
|
||||||
_, err = session.Exec("UPDATE cache_data SET data=?, created=?, expire=? WHERE key=?", data, now, expiresAtEpoch, key)
|
_, err = session.Exec(`UPDATE cache_data SET data=?, created=?, expire=? WHERE cache_key='?'`, data, getTime().Unix(), expiresAtEpoch, key)
|
||||||
} else {
|
} else {
|
||||||
_, err = session.Exec("INSERT INTO cache_data(key,data,created_at,expires) VALUES(?,?,?,?)", key, data, now, expiresAtEpoch)
|
_, err = session.Exec(`INSERT INTO cache_data (cache_key,data,created_at,expires) VALUES(?,?,?,?)`, key, data, getTime().Unix(), expiresAtEpoch)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dc *databaseCache) Delete(key string) error {
|
func (dc *databaseCache) Delete(key string) error {
|
||||||
sql := `DELETE FROM cache_data WHERE key = ?`
|
sql := "DELETE FROM cache_data WHERE cache_key=?"
|
||||||
_, err := dc.SQLStore.NewSession().Exec(sql, key)
|
_, err := dc.SQLStore.NewSession().Exec(sql, key)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
type cacheData struct {
|
type CacheData struct {
|
||||||
Key string
|
CacheKey string
|
||||||
Data []byte
|
Data []byte
|
||||||
Expires int64
|
Expires int64
|
||||||
CreatedAt int64
|
CreatedAt int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// func (cd CacheData) TableName() string { return "cache_data" }
|
||||||
|
@ -21,10 +21,16 @@ func TestDatabaseStorageGarbageCollection(t *testing.T) {
|
|||||||
obj := &CacheableStruct{String: "foolbar"}
|
obj := &CacheableStruct{String: "foolbar"}
|
||||||
|
|
||||||
//set time.now to 2 weeks ago
|
//set time.now to 2 weeks ago
|
||||||
|
var err error
|
||||||
getTime = func() time.Time { return time.Now().AddDate(0, 0, -2) }
|
getTime = func() time.Time { return time.Now().AddDate(0, 0, -2) }
|
||||||
db.Set("key1", obj, 1000*time.Second)
|
err = db.Set("key1", obj, 1000*time.Second)
|
||||||
db.Set("key2", obj, 1000*time.Second)
|
assert.Equal(t, err, nil)
|
||||||
db.Set("key3", obj, 1000*time.Second)
|
|
||||||
|
err = db.Set("key2", obj, 1000*time.Second)
|
||||||
|
assert.Equal(t, err, nil)
|
||||||
|
|
||||||
|
err = db.Set("key3", obj, 1000*time.Second)
|
||||||
|
assert.Equal(t, err, nil)
|
||||||
|
|
||||||
// insert object that should never expire
|
// insert object that should never expire
|
||||||
db.Set("key4", obj, 0)
|
db.Set("key4", obj, 0)
|
||||||
@ -36,8 +42,8 @@ func TestDatabaseStorageGarbageCollection(t *testing.T) {
|
|||||||
db.internalRunGC()
|
db.internalRunGC()
|
||||||
|
|
||||||
//try to read values
|
//try to read values
|
||||||
_, err := db.Get("key1")
|
_, err = db.Get("key1")
|
||||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
assert.Equal(t, err, ErrCacheItemNotFound, "expected cache item not found. got: ", err)
|
||||||
_, err = db.Get("key2")
|
_, err = db.Get("key2")
|
||||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||||
_, err = db.Get("key3")
|
_, err = db.Get("key3")
|
||||||
|
@ -58,34 +58,34 @@ func runTestsForClient(t *testing.T, client CacheStorage) {
|
|||||||
func canPutGetAndDeleteCachedObjects(t *testing.T, client CacheStorage) {
|
func canPutGetAndDeleteCachedObjects(t *testing.T, client CacheStorage) {
|
||||||
cacheableStruct := CacheableStruct{String: "hej", Int64: 2000}
|
cacheableStruct := CacheableStruct{String: "hej", Int64: 2000}
|
||||||
|
|
||||||
err := client.Set("key", cacheableStruct, 0)
|
err := client.Set("key1", cacheableStruct, 0)
|
||||||
assert.Equal(t, err, nil)
|
assert.Equal(t, err, nil, "expected nil. got: ", err)
|
||||||
|
|
||||||
data, err := client.Get("key")
|
data, err := client.Get("key1")
|
||||||
s, ok := data.(CacheableStruct)
|
s, ok := data.(CacheableStruct)
|
||||||
|
|
||||||
assert.Equal(t, ok, true)
|
assert.Equal(t, ok, true)
|
||||||
assert.Equal(t, s.String, "hej")
|
assert.Equal(t, s.String, "hej")
|
||||||
assert.Equal(t, s.Int64, int64(2000))
|
assert.Equal(t, s.Int64, int64(2000))
|
||||||
|
|
||||||
err = client.Delete("key")
|
err = client.Delete("key1")
|
||||||
assert.Equal(t, err, nil)
|
assert.Equal(t, err, nil)
|
||||||
|
|
||||||
_, err = client.Get("key")
|
_, err = client.Get("key1")
|
||||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
func canNotFetchExpiredItems(t *testing.T, client CacheStorage) {
|
func canNotFetchExpiredItems(t *testing.T, client CacheStorage) {
|
||||||
cacheableStruct := CacheableStruct{String: "hej", Int64: 2000}
|
cacheableStruct := CacheableStruct{String: "hej", Int64: 2000}
|
||||||
|
|
||||||
err := client.Set("key", cacheableStruct, time.Second)
|
err := client.Set("key1", cacheableStruct, time.Second)
|
||||||
assert.Equal(t, err, nil)
|
assert.Equal(t, err, nil)
|
||||||
|
|
||||||
//not sure how this can be avoided when testing redis/memcached :/
|
//not sure how this can be avoided when testing redis/memcached :/
|
||||||
<-time.After(time.Second + time.Millisecond)
|
<-time.After(time.Second + time.Millisecond)
|
||||||
|
|
||||||
// should not be able to read that value since its expired
|
// should not be able to read that value since its expired
|
||||||
_, err = client.Get("key")
|
_, err = client.Get("key1")
|
||||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,12 +94,12 @@ func canSetInfiniteCacheExpiration(t *testing.T, client CacheStorage) {
|
|||||||
|
|
||||||
// insert cache item one day back
|
// insert cache item one day back
|
||||||
getTime = func() time.Time { return time.Now().AddDate(0, 0, -2) }
|
getTime = func() time.Time { return time.Now().AddDate(0, 0, -2) }
|
||||||
err := client.Set("key", cacheableStruct, 0)
|
err := client.Set("key1", cacheableStruct, 0)
|
||||||
assert.Equal(t, err, nil)
|
assert.Equal(t, err, nil)
|
||||||
|
|
||||||
// should not be able to read that value since its expired
|
// should not be able to read that value since its expired
|
||||||
getTime = time.Now
|
getTime = time.Now
|
||||||
data, err := client.Get("key")
|
data, err := client.Get("key1")
|
||||||
s, ok := data.(CacheableStruct)
|
s, ok := data.(CacheableStruct)
|
||||||
|
|
||||||
assert.Equal(t, ok, true)
|
assert.Equal(t, ok, true)
|
||||||
|
@ -6,17 +6,17 @@ func addCacheMigration(mg *migrator.Migrator) {
|
|||||||
var cacheDataV1 = migrator.Table{
|
var cacheDataV1 = migrator.Table{
|
||||||
Name: "cache_data",
|
Name: "cache_data",
|
||||||
Columns: []*migrator.Column{
|
Columns: []*migrator.Column{
|
||||||
{Name: "key", Type: migrator.DB_NVarchar, IsPrimaryKey: true, Length: 168},
|
{Name: "cache_key", Type: migrator.DB_NVarchar, IsPrimaryKey: true, Length: 168},
|
||||||
{Name: "data", Type: migrator.DB_Blob},
|
{Name: "data", Type: migrator.DB_Blob},
|
||||||
{Name: "expires", Type: migrator.DB_Integer, Length: 255, Nullable: false},
|
{Name: "expires", Type: migrator.DB_Integer, Length: 255, Nullable: false},
|
||||||
{Name: "created_at", Type: migrator.DB_Integer, Length: 255, Nullable: false},
|
{Name: "created_at", Type: migrator.DB_Integer, Length: 255, Nullable: false},
|
||||||
},
|
},
|
||||||
Indices: []*migrator.Index{
|
Indices: []*migrator.Index{
|
||||||
{Cols: []string{"key"}, Type: migrator.UniqueIndex},
|
{Cols: []string{"cache_key"}, Type: migrator.UniqueIndex},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
mg.AddMigration("create cache_data table", migrator.NewAddTableMigration(cacheDataV1))
|
mg.AddMigration("create cache_data table", migrator.NewAddTableMigration(cacheDataV1))
|
||||||
|
|
||||||
mg.AddMigration("add unique index cache_data.key", migrator.NewAddIndexMigration(cacheDataV1, cacheDataV1.Indices[0]))
|
mg.AddMigration("add unique index cache_data.cache_key", migrator.NewAddIndexMigration(cacheDataV1, cacheDataV1.Indices[0]))
|
||||||
}
|
}
|
||||||
|
@ -13,5 +13,6 @@ function exit_if_fail {
|
|||||||
echo "running redis and memcache tests"
|
echo "running redis and memcache tests"
|
||||||
#set -e
|
#set -e
|
||||||
#time for d in $(go list ./pkg/...); do
|
#time for d in $(go list ./pkg/...); do
|
||||||
time exit_if_fail go test -tags="redis memcached" ./pkg/infra/distcache/...
|
time exit_if_fail go test -tags=redis ./pkg/infra/distcache/...
|
||||||
|
time exit_if_fail go test -tags=memcached ./pkg/infra/distcache/...
|
||||||
#done
|
#done
|
||||||
|
Loading…
Reference in New Issue
Block a user