mirror of
https://github.com/grafana/grafana.git
synced 2024-11-26 02:40:26 -06:00
renames key to cache_key
apparently key is a reserved keyword in mysql. and the error messages doesnt mention that. can I please have 6h back?
This commit is contained in:
parent
dbc1315d6f
commit
66e71b66dd
@ -64,11 +64,8 @@ jobs:
|
||||
working_directory: /go/src/github.com/grafana/grafana
|
||||
steps:
|
||||
- checkout
|
||||
#- run: sudo apt update
|
||||
#- run: sudo apt install -y postgresql-client
|
||||
- run: dockerize -wait tcp://127.0.0.1:11211 -timeout 120s
|
||||
- run: dockerize -wait tcp://127.0.0.1:6739 -timeout 120s
|
||||
#- run: 'PGPASSWORD=grafanatest psql -p 5432 -h 127.0.0.1 -U grafanatest -d grafanatest -f devenv/docker/blocks/postgres_tests/setup.sql'
|
||||
- run: dockerize -wait tcp://127.0.0.1:6379 -timeout 120s
|
||||
- run:
|
||||
name: cache server tests
|
||||
command: './scripts/circle-test-cache-servers.sh'
|
||||
@ -562,6 +559,8 @@ workflows:
|
||||
filters: *filter-not-release-or-master
|
||||
- postgres-integration-test:
|
||||
filters: *filter-not-release-or-master
|
||||
- cache-server-test:
|
||||
filters: *filter-not-release-or-master
|
||||
- grafana-docker-pr:
|
||||
requires:
|
||||
- build
|
||||
|
@ -8,6 +8,8 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
)
|
||||
|
||||
var getTime = time.Now
|
||||
|
||||
type databaseCache struct {
|
||||
SQLStore *sqlstore.SqlStore
|
||||
log log.Logger
|
||||
@ -34,8 +36,6 @@ func (dc *databaseCache) Run(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
var getTime = time.Now
|
||||
|
||||
func (dc *databaseCache) internalRunGC() {
|
||||
now := getTime().Unix()
|
||||
sql := `DELETE FROM cache_data WHERE (? - created_at) >= expires AND expires <> 0`
|
||||
@ -47,19 +47,20 @@ func (dc *databaseCache) internalRunGC() {
|
||||
}
|
||||
|
||||
func (dc *databaseCache) Get(key string) (interface{}, error) {
|
||||
cacheHits := []cacheData{}
|
||||
err := dc.SQLStore.NewSession().Where(`key = ?`, key).Find(&cacheHits)
|
||||
cacheHits := []CacheData{}
|
||||
sess := dc.SQLStore.NewSession()
|
||||
defer sess.Close()
|
||||
err := sess.Where("cache_key= ?", key).Find(&cacheHits)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cacheHit cacheData
|
||||
if len(cacheHits) == 0 {
|
||||
return nil, ErrCacheItemNotFound
|
||||
}
|
||||
|
||||
cacheHit = cacheHits[0]
|
||||
// if Expires is set. Make sure its still valid.
|
||||
cacheHit := cacheHits[0]
|
||||
if cacheHit.Expires > 0 {
|
||||
existedButExpired := getTime().Unix()-cacheHit.CreatedAt >= cacheHit.Expires
|
||||
if existedButExpired {
|
||||
@ -83,9 +84,10 @@ func (dc *databaseCache) Set(key string, value interface{}, expire time.Duration
|
||||
return err
|
||||
}
|
||||
|
||||
now := getTime().Unix()
|
||||
cacheHits := []cacheData{}
|
||||
err = dc.SQLStore.NewSession().Where(`key = ?`, key).Find(&cacheHits)
|
||||
session := dc.SQLStore.NewSession()
|
||||
|
||||
var cacheHit CacheData
|
||||
has, err := session.Where("cache_key = ?", key).Get(&cacheHit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -95,27 +97,28 @@ func (dc *databaseCache) Set(key string, value interface{}, expire time.Duration
|
||||
expiresAtEpoch = int64(expire) / int64(time.Second)
|
||||
}
|
||||
|
||||
session := dc.SQLStore.NewSession()
|
||||
// insert or update depending on if item already exist
|
||||
if len(cacheHits) > 0 {
|
||||
_, err = session.Exec("UPDATE cache_data SET data=?, created=?, expire=? WHERE key=?", data, now, expiresAtEpoch, key)
|
||||
if has {
|
||||
_, err = session.Exec(`UPDATE cache_data SET data=?, created=?, expire=? WHERE cache_key='?'`, data, getTime().Unix(), expiresAtEpoch, key)
|
||||
} else {
|
||||
_, err = session.Exec("INSERT INTO cache_data(key,data,created_at,expires) VALUES(?,?,?,?)", key, data, now, expiresAtEpoch)
|
||||
_, err = session.Exec(`INSERT INTO cache_data (cache_key,data,created_at,expires) VALUES(?,?,?,?)`, key, data, getTime().Unix(), expiresAtEpoch)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (dc *databaseCache) Delete(key string) error {
|
||||
sql := `DELETE FROM cache_data WHERE key = ?`
|
||||
sql := "DELETE FROM cache_data WHERE cache_key=?"
|
||||
_, err := dc.SQLStore.NewSession().Exec(sql, key)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type cacheData struct {
|
||||
Key string
|
||||
type CacheData struct {
|
||||
CacheKey string
|
||||
Data []byte
|
||||
Expires int64
|
||||
CreatedAt int64
|
||||
}
|
||||
|
||||
// func (cd CacheData) TableName() string { return "cache_data" }
|
||||
|
@ -21,10 +21,16 @@ func TestDatabaseStorageGarbageCollection(t *testing.T) {
|
||||
obj := &CacheableStruct{String: "foolbar"}
|
||||
|
||||
//set time.now to 2 weeks ago
|
||||
var err error
|
||||
getTime = func() time.Time { return time.Now().AddDate(0, 0, -2) }
|
||||
db.Set("key1", obj, 1000*time.Second)
|
||||
db.Set("key2", obj, 1000*time.Second)
|
||||
db.Set("key3", obj, 1000*time.Second)
|
||||
err = db.Set("key1", obj, 1000*time.Second)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
err = db.Set("key2", obj, 1000*time.Second)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
err = db.Set("key3", obj, 1000*time.Second)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
// insert object that should never expire
|
||||
db.Set("key4", obj, 0)
|
||||
@ -36,8 +42,8 @@ func TestDatabaseStorageGarbageCollection(t *testing.T) {
|
||||
db.internalRunGC()
|
||||
|
||||
//try to read values
|
||||
_, err := db.Get("key1")
|
||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||
_, err = db.Get("key1")
|
||||
assert.Equal(t, err, ErrCacheItemNotFound, "expected cache item not found. got: ", err)
|
||||
_, err = db.Get("key2")
|
||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||
_, err = db.Get("key3")
|
||||
|
@ -58,34 +58,34 @@ func runTestsForClient(t *testing.T, client CacheStorage) {
|
||||
func canPutGetAndDeleteCachedObjects(t *testing.T, client CacheStorage) {
|
||||
cacheableStruct := CacheableStruct{String: "hej", Int64: 2000}
|
||||
|
||||
err := client.Set("key", cacheableStruct, 0)
|
||||
assert.Equal(t, err, nil)
|
||||
err := client.Set("key1", cacheableStruct, 0)
|
||||
assert.Equal(t, err, nil, "expected nil. got: ", err)
|
||||
|
||||
data, err := client.Get("key")
|
||||
data, err := client.Get("key1")
|
||||
s, ok := data.(CacheableStruct)
|
||||
|
||||
assert.Equal(t, ok, true)
|
||||
assert.Equal(t, s.String, "hej")
|
||||
assert.Equal(t, s.Int64, int64(2000))
|
||||
|
||||
err = client.Delete("key")
|
||||
err = client.Delete("key1")
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
_, err = client.Get("key")
|
||||
_, err = client.Get("key1")
|
||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||
}
|
||||
|
||||
func canNotFetchExpiredItems(t *testing.T, client CacheStorage) {
|
||||
cacheableStruct := CacheableStruct{String: "hej", Int64: 2000}
|
||||
|
||||
err := client.Set("key", cacheableStruct, time.Second)
|
||||
err := client.Set("key1", cacheableStruct, time.Second)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
//not sure how this can be avoided when testing redis/memcached :/
|
||||
<-time.After(time.Second + time.Millisecond)
|
||||
|
||||
// should not be able to read that value since its expired
|
||||
_, err = client.Get("key")
|
||||
_, err = client.Get("key1")
|
||||
assert.Equal(t, err, ErrCacheItemNotFound)
|
||||
}
|
||||
|
||||
@ -94,12 +94,12 @@ func canSetInfiniteCacheExpiration(t *testing.T, client CacheStorage) {
|
||||
|
||||
// insert cache item one day back
|
||||
getTime = func() time.Time { return time.Now().AddDate(0, 0, -2) }
|
||||
err := client.Set("key", cacheableStruct, 0)
|
||||
err := client.Set("key1", cacheableStruct, 0)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
// should not be able to read that value since its expired
|
||||
getTime = time.Now
|
||||
data, err := client.Get("key")
|
||||
data, err := client.Get("key1")
|
||||
s, ok := data.(CacheableStruct)
|
||||
|
||||
assert.Equal(t, ok, true)
|
||||
|
@ -6,17 +6,17 @@ func addCacheMigration(mg *migrator.Migrator) {
|
||||
var cacheDataV1 = migrator.Table{
|
||||
Name: "cache_data",
|
||||
Columns: []*migrator.Column{
|
||||
{Name: "key", Type: migrator.DB_NVarchar, IsPrimaryKey: true, Length: 168},
|
||||
{Name: "cache_key", Type: migrator.DB_NVarchar, IsPrimaryKey: true, Length: 168},
|
||||
{Name: "data", Type: migrator.DB_Blob},
|
||||
{Name: "expires", Type: migrator.DB_Integer, Length: 255, Nullable: false},
|
||||
{Name: "created_at", Type: migrator.DB_Integer, Length: 255, Nullable: false},
|
||||
},
|
||||
Indices: []*migrator.Index{
|
||||
{Cols: []string{"key"}, Type: migrator.UniqueIndex},
|
||||
{Cols: []string{"cache_key"}, Type: migrator.UniqueIndex},
|
||||
},
|
||||
}
|
||||
|
||||
mg.AddMigration("create cache_data table", migrator.NewAddTableMigration(cacheDataV1))
|
||||
|
||||
mg.AddMigration("add unique index cache_data.key", migrator.NewAddIndexMigration(cacheDataV1, cacheDataV1.Indices[0]))
|
||||
mg.AddMigration("add unique index cache_data.cache_key", migrator.NewAddIndexMigration(cacheDataV1, cacheDataV1.Indices[0]))
|
||||
}
|
||||
|
@ -13,5 +13,6 @@ function exit_if_fail {
|
||||
echo "running redis and memcache tests"
|
||||
#set -e
|
||||
#time for d in $(go list ./pkg/...); do
|
||||
time exit_if_fail go test -tags="redis memcached" ./pkg/infra/distcache/...
|
||||
time exit_if_fail go test -tags=redis ./pkg/infra/distcache/...
|
||||
time exit_if_fail go test -tags=memcached ./pkg/infra/distcache/...
|
||||
#done
|
||||
|
Loading…
Reference in New Issue
Block a user