add garbage collector for database cache

This commit is contained in:
bergquist 2019-02-15 14:31:52 +01:00
parent 996d5059b1
commit d99af23946
3 changed files with 97 additions and 12 deletions

View File

@ -3,18 +3,48 @@ package distcache
import (
"time"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/services/sqlstore"
)
type databaseCache struct {
SQLStore *sqlstore.SqlStore
log log.Logger
}
func newDatabaseCache(sqlstore *sqlstore.SqlStore) *databaseCache {
dc := &databaseCache{
SQLStore: sqlstore,
log: log.New("distcache.database"),
}
go dc.StartGC()
return dc
}
var getTime = time.Now
func (dc *databaseCache) Get(key string) (interface{}, error) {
//now := getTime().Unix()
func (dc *databaseCache) internalRunGC() {
now := getTime().Unix()
sql := `DELETE FROM cache_data WHERE (? - created) >= expire`
//EXTRACT(EPOCH FROM NOW()) - created >= expire
//UNIX_TIMESTAMP(NOW()) - created >= expire
_, err := dc.SQLStore.NewSession().Exec(sql, now)
if err != nil {
dc.log.Error("failed to run garbage collect", "error", err)
}
}
func (dc *databaseCache) StartGC() {
dc.internalRunGC()
time.AfterFunc(time.Second*10, func() {
dc.StartGC()
})
}
func (dc *databaseCache) Get(key string) (interface{}, error) {
cacheHits := []CacheData{}
err := dc.SQLStore.NewSession().Where(`key = ?`, key).Find(&cacheHits)
if err != nil {
@ -65,7 +95,7 @@ func (dc *databaseCache) Put(key string, value interface{}, expire int64) error
}
if len(cacheHits) > 0 {
_, err = dc.SQLStore.NewSession().Exec("UPDATE cached_data SET data=?, created=?, expire=? WHERE key=?", data, now, expire, key)
_, err = dc.SQLStore.NewSession().Exec("UPDATE cache_data SET data=?, created=?, expire=? WHERE key=?", data, now, expire, key)
} else {
_, err = dc.SQLStore.NewSession().Exec("INSERT INTO cache_data(key,data,created_at,expires) VALUES(?,?,?,?)", key, data, now, expire)
}

View File

@ -0,0 +1,50 @@
package distcache
import (
"testing"
"time"
"github.com/bmizerany/assert"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/services/sqlstore"
)
func TestDatabaseStorageGarbageCollection(t *testing.T) {
sqlstore := sqlstore.InitTestDB(t)
db := &databaseCache{
SQLStore: sqlstore,
log: log.New("distcache.database"),
}
obj := &CacheableStruct{String: "foolbar"}
//set time.now to 2 weeks ago
getTime = func() time.Time { return time.Now().AddDate(0, 0, -2) }
db.Put("key1", obj, 1000)
db.Put("key2", obj, 1000)
db.Put("key3", obj, 1000)
// insert object that should never expire
db.Put("key4", obj, 0)
getTime = time.Now
db.Put("key5", obj, 1000)
//run GC
db.internalRunGC()
//try to read values
_, err := db.Get("key1")
assert.Equal(t, err, ErrCacheItemNotFound)
_, err = db.Get("key2")
assert.Equal(t, err, ErrCacheItemNotFound)
_, err = db.Get("key3")
assert.Equal(t, err, ErrCacheItemNotFound)
_, err = db.Get("key4")
assert.Equal(t, err, nil)
_, err = db.Get("key5")
assert.Equal(t, err, nil)
}

View File

@ -1,17 +1,22 @@
package migrations
import . "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
import "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
func addCacheMigration(mg *Migrator) {
var cacheDataV1 = Table{
func addCacheMigration(mg *migrator.Migrator) {
var cacheDataV1 = migrator.Table{
Name: "cache_data",
Columns: []*Column{
{Name: "key", Type: DB_Char, IsPrimaryKey: true, Length: 16},
{Name: "data", Type: DB_Blob},
{Name: "expires", Type: DB_Integer, Length: 255, Nullable: false},
{Name: "created_at", Type: DB_Integer, Length: 255, Nullable: false},
Columns: []*migrator.Column{
{Name: "key", Type: migrator.DB_NVarchar, IsPrimaryKey: true, Length: 168},
{Name: "data", Type: migrator.DB_Blob},
{Name: "expires", Type: migrator.DB_Integer, Length: 255, Nullable: false},
{Name: "created_at", Type: migrator.DB_Integer, Length: 255, Nullable: false},
},
Indices: []*migrator.Index{
{Cols: []string{"key"}, Type: migrator.UniqueIndex},
},
}
mg.AddMigration("create cache_data table", NewAddTableMigration(cacheDataV1))
mg.AddMigration("create cache_data table", migrator.NewAddTableMigration(cacheDataV1))
mg.AddMigration("add unique index cache_data.key", migrator.NewAddIndexMigration(cacheDataV1, cacheDataV1.Indices[0]))
}