mirror of
https://github.com/mattermost/mattermost.git
synced 2025-02-25 18:55:24 -06:00
MM-56876: Redis: first introduction (#27752)
```release-note NONE ``` --------- Co-authored-by: Jesús Espino <jespinog@gmail.com> Co-authored-by: Mattermost Build <build@mattermost.com>
This commit is contained in:
parent
c3ed07e679
commit
540febd866
@ -315,6 +315,10 @@ store-mocks: ## Creates mock files.
|
||||
$(GO) install github.com/vektra/mockery/v2/...@v2.42.2
|
||||
$(GOBIN)/mockery --dir channels/store --name ".*Store" --output channels/store/storetest/mocks --note 'Regenerate this file using `make store-mocks`.'
|
||||
|
||||
cache-mocks:
|
||||
$(GO) install github.com/vektra/mockery/v2/...@v2.42.2
|
||||
$(GOBIN)/mockery --dir platform/services/cache --all --output platform/services/cache/mocks --note 'Regenerate this file using `make cache-mocks`.'
|
||||
|
||||
telemetry-mocks: ## Creates mock files.
|
||||
$(GO) install github.com/vektra/mockery/v2/...@v2.42.2
|
||||
$(GOBIN)/mockery --dir platform/services/telemetry --all --output platform/services/telemetry/mocks --note 'Regenerate this file using `make telemetry-mocks`.'
|
||||
@ -376,7 +380,7 @@ mmctl-mocks: ## Creates mocks for mmctl
|
||||
pluginapi: ## Generates api and hooks glue code for plugins
|
||||
cd ./public && $(GO) generate $(GOFLAGS) ./plugin
|
||||
|
||||
mocks: store-mocks telemetry-mocks filestore-mocks ldap-mocks plugin-mocks einterfaces-mocks searchengine-mocks sharedchannel-mocks misc-mocks email-mocks platform-mocks mmctl-mocks mocks-public
|
||||
mocks: store-mocks telemetry-mocks filestore-mocks ldap-mocks plugin-mocks einterfaces-mocks searchengine-mocks sharedchannel-mocks misc-mocks email-mocks platform-mocks mmctl-mocks mocks-public cache-mocks
|
||||
|
||||
layers: app-layers store-layers pluginapi
|
||||
|
||||
@ -402,7 +406,6 @@ endif
|
||||
|
||||
check-style: plugin-checker vet golangci-lint ## Runs style/lint checks
|
||||
|
||||
|
||||
gotestsum:
|
||||
$(GO) install gotest.tools/gotestsum@v1.11.0
|
||||
|
||||
|
@ -34,7 +34,7 @@ const (
|
||||
MaxServerBusySeconds = 86400
|
||||
)
|
||||
|
||||
var redirectLocationDataCache = cache.NewLRU(cache.LRUOptions{
|
||||
var redirectLocationDataCache = cache.NewLRU(&cache.CacheOptions{
|
||||
Size: RedirectLocationCacheSize,
|
||||
})
|
||||
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
"github.com/mattermost/mattermost/server/v8/platform/shared/mail"
|
||||
)
|
||||
|
||||
var latestVersionCache = cache.NewLRU(cache.LRUOptions{
|
||||
var latestVersionCache = cache.NewLRU(&cache.CacheOptions{
|
||||
Size: 1,
|
||||
})
|
||||
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
const LinkCacheSize = 10000
|
||||
const LinkCacheDuration = 1 * time.Hour
|
||||
|
||||
var linkCache = cache.NewLRU(cache.LRUOptions{
|
||||
var linkCache = cache.NewLRU(&cache.CacheOptions{
|
||||
Size: LinkCacheSize,
|
||||
})
|
||||
|
||||
|
@ -47,7 +47,7 @@ func StoreOverride(override any) Option {
|
||||
func StoreOverrideWithCache(override store.Store) Option {
|
||||
return func(ps *PlatformService) error {
|
||||
ps.newStore = func() (store.Store, error) {
|
||||
lcl, err := localcachelayer.NewLocalCacheLayer(override, ps.metricsIFace, ps.clusterIFace, ps.cacheProvider)
|
||||
lcl, err := localcachelayer.NewLocalCacheLayer(override, ps.metricsIFace, ps.clusterIFace, ps.cacheProvider, ps.Log())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -136,21 +136,6 @@ func New(sc ServiceConfig, options ...Option) (*PlatformService, error) {
|
||||
// Assume the first user account has not been created yet. A call to the DB will later check if this is really the case.
|
||||
ps.isFirstUserAccount.Store(true)
|
||||
|
||||
// Step 1: Cache provider.
|
||||
// At the moment we only have this implementation
|
||||
// in the future the cache provider will be built based on the loaded config
|
||||
ps.cacheProvider = cache.NewProvider()
|
||||
if err2 := ps.cacheProvider.Connect(); err2 != nil {
|
||||
return nil, fmt.Errorf("unable to connect to cache provider: %w", err2)
|
||||
}
|
||||
|
||||
// Apply options, some of the options overrides the default config actually.
|
||||
for _, option := range options {
|
||||
if err := option(ps); err != nil {
|
||||
return nil, fmt.Errorf("failed to apply option: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// the config store is not set, we need to create a new one
|
||||
if ps.configStore == nil {
|
||||
innerStore, err := config.NewFileStore("config.json", true)
|
||||
@ -166,13 +151,46 @@ func New(sc ServiceConfig, options ...Option) (*PlatformService, error) {
|
||||
ps.configStore = configStore
|
||||
}
|
||||
|
||||
// Step 2: Start logging.
|
||||
if err := ps.initLogging(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize logging: %w", err)
|
||||
// Step 1: Cache provider.
|
||||
cacheConfig := ps.configStore.Get().CacheSettings
|
||||
var err error
|
||||
if *cacheConfig.CacheType == model.CacheTypeLRU {
|
||||
ps.cacheProvider = cache.NewProvider()
|
||||
} else if *cacheConfig.CacheType == model.CacheTypeRedis {
|
||||
ps.cacheProvider, err = cache.NewRedisProvider(
|
||||
&cache.RedisOptions{
|
||||
RedisAddr: *cacheConfig.RedisAddress,
|
||||
RedisPassword: *cacheConfig.RedisPassword,
|
||||
RedisDB: *cacheConfig.RedisDB,
|
||||
},
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create cache provider: %w", err)
|
||||
}
|
||||
|
||||
// The value of res is used later, after the logger is initialized.
|
||||
// There's a certain order of steps we need to follow in the server startup phase.
|
||||
res, err := ps.cacheProvider.Connect()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to connect to cache provider: %w", err)
|
||||
}
|
||||
|
||||
// Apply options, some of the options overrides the default config actually.
|
||||
for _, option := range options {
|
||||
if err2 := option(ps); err2 != nil {
|
||||
return nil, fmt.Errorf("failed to apply option: %w", err2)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Start logging.
|
||||
if err2 := ps.initLogging(); err2 != nil {
|
||||
return nil, fmt.Errorf("failed to initialize logging: %w", err2)
|
||||
}
|
||||
ps.Log().Info("Successfully connected to cache backend", mlog.String("backend", *cacheConfig.CacheType), mlog.String("result", res))
|
||||
|
||||
// This is called after initLogging() to avoid a race condition.
|
||||
mlog.Info("Server is initializing...", mlog.String("go_version", runtime.Version()))
|
||||
ps.Log().Info("Server is initializing...", mlog.String("go_version", runtime.Version()))
|
||||
|
||||
// Step 3: Search Engine
|
||||
searchEngine := searchengine.NewBroker(ps.Config())
|
||||
@ -192,6 +210,8 @@ func New(sc ServiceConfig, options ...Option) (*PlatformService, error) {
|
||||
ps.metricsIFace = metricsInterfaceFn(ps, *ps.configStore.Get().SqlSettings.DriverName, *ps.configStore.Get().SqlSettings.DataSource)
|
||||
}
|
||||
|
||||
ps.cacheProvider.SetMetrics(ps.metricsIFace)
|
||||
|
||||
// Step 6: Store.
|
||||
// Depends on Step 0 (config), 1 (cacheProvider), 3 (search engine), 5 (metrics) and cluster.
|
||||
if ps.newStore == nil {
|
||||
@ -206,7 +226,6 @@ func New(sc ServiceConfig, options ...Option) (*PlatformService, error) {
|
||||
// Timer layer
|
||||
// |
|
||||
// Cache layer
|
||||
var err error
|
||||
ps.sqlStore, err = sqlstore.New(ps.Config().SqlSettings, ps.Log(), ps.metricsIFace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -227,6 +246,7 @@ func New(sc ServiceConfig, options ...Option) (*PlatformService, error) {
|
||||
ps.metricsIFace,
|
||||
ps.clusterIFace,
|
||||
ps.cacheProvider,
|
||||
ps.Log(),
|
||||
)
|
||||
if err2 != nil {
|
||||
return nil, fmt.Errorf("cannot create local cache layer: %w", err2)
|
||||
@ -267,14 +287,13 @@ func New(sc ServiceConfig, options ...Option) (*PlatformService, error) {
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
ps.Store, err = ps.newStore()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create store: %w", err)
|
||||
}
|
||||
|
||||
// Needed before loading license
|
||||
ps.statusCache, err = ps.cacheProvider.NewCache(&cache.CacheOptions{
|
||||
ps.statusCache, err = cache.NewProvider().NewCache(&cache.CacheOptions{
|
||||
Name: "Status",
|
||||
Size: model.StatusCacheSize,
|
||||
Striped: true,
|
||||
@ -284,7 +303,8 @@ func New(sc ServiceConfig, options ...Option) (*PlatformService, error) {
|
||||
return nil, fmt.Errorf("unable to create status cache: %w", err)
|
||||
}
|
||||
|
||||
ps.sessionCache, err = ps.cacheProvider.NewCache(&cache.CacheOptions{
|
||||
ps.sessionCache, err = cache.NewProvider().NewCache(&cache.CacheOptions{
|
||||
Name: "Session",
|
||||
Size: model.SessionCacheSize,
|
||||
Striped: true,
|
||||
StripedBuckets: maxInt(runtime.NumCPU()-1, 1),
|
||||
|
@ -118,7 +118,7 @@ func (a *App) deduplicateCreatePost(rctx request.CTX, post *model.Post) (foundPo
|
||||
}
|
||||
|
||||
if nErr != nil {
|
||||
return nil, model.NewAppError("errorGetPostId", "api.post.error_get_post_id.pending", nil, "", http.StatusInternalServerError)
|
||||
return nil, model.NewAppError("errorGetPostId", "api.post.error_get_post_id.pending", nil, "", http.StatusInternalServerError).Wrap(nErr)
|
||||
}
|
||||
|
||||
// If another thread saved the cache record, but hasn't yet updated it with the actual post
|
||||
|
@ -305,11 +305,13 @@ func NewServer(options ...Option) (*Server, error) {
|
||||
model.AppErrorInit(i18n.T)
|
||||
|
||||
if s.seenPendingPostIdsCache, err = s.platform.CacheProvider().NewCache(&cache.CacheOptions{
|
||||
Name: "seen_pending_post_ids",
|
||||
Size: PendingPostIDsCacheSize,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "Unable to create pending post ids cache")
|
||||
}
|
||||
if s.openGraphDataCache, err = s.platform.CacheProvider().NewCache(&cache.CacheOptions{
|
||||
Name: "opengraph_data",
|
||||
Size: openGraphMetadataCacheSize,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "Unable to create opengraphdata cache")
|
||||
|
@ -5,10 +5,13 @@ package localcachelayer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/public/shared/request"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store"
|
||||
"github.com/mattermost/mattermost/server/v8/platform/services/cache"
|
||||
)
|
||||
|
||||
type LocalCacheChannelStore struct {
|
||||
@ -242,17 +245,31 @@ func (s LocalCacheChannelStore) GetMany(ids []string, allowFromCache bool) (mode
|
||||
var channelsToQuery []string
|
||||
|
||||
if allowFromCache {
|
||||
for _, id := range ids {
|
||||
var ch *model.Channel
|
||||
if err := s.rootStore.doStandardReadCache(s.rootStore.channelByIdCache, id, &ch); err == nil {
|
||||
foundChannels = append(foundChannels, ch)
|
||||
var toPass []any
|
||||
for i := 0; i < len(ids); i++ {
|
||||
var channel *model.Channel
|
||||
toPass = append(toPass, &channel)
|
||||
}
|
||||
|
||||
errs := s.rootStore.doMultiReadCache(s.rootStore.roleCache, ids, toPass)
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
if err != cache.ErrKeyNotFound {
|
||||
s.rootStore.logger.Warn("Error in Channelstore.GetMany: ", mlog.Err(err))
|
||||
}
|
||||
channelsToQuery = append(channelsToQuery, ids[i])
|
||||
} else {
|
||||
channelsToQuery = append(channelsToQuery, id)
|
||||
gotChannel := *(toPass[i].(**model.Channel))
|
||||
if gotChannel != nil {
|
||||
foundChannels = append(foundChannels, gotChannel)
|
||||
} else {
|
||||
s.rootStore.logger.Warn("Found nil channel in GetMany. This is not expected")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if channelsToQuery == nil {
|
||||
if len(channelsToQuery) == 0 {
|
||||
return foundChannels, nil
|
||||
}
|
||||
|
||||
@ -326,19 +343,35 @@ func (s LocalCacheChannelStore) getByNames(teamId string, names []string, allowF
|
||||
if allowFromCache {
|
||||
var misses []string
|
||||
visited := make(map[string]struct{})
|
||||
var newKeys []string
|
||||
for _, name := range names {
|
||||
if _, ok := visited[name]; ok {
|
||||
continue
|
||||
}
|
||||
visited[name] = struct{}{}
|
||||
var cacheItem *model.Channel
|
||||
newKeys = append(newKeys, teamId+name)
|
||||
}
|
||||
|
||||
if err := s.rootStore.doStandardReadCache(s.rootStore.channelByNameCache, teamId+name, &cacheItem); err == nil {
|
||||
if includeArchivedChannels || cacheItem.DeleteAt == 0 {
|
||||
channels = append(channels, cacheItem)
|
||||
toPass := make([]any, 0, len(newKeys))
|
||||
for i := 0; i < len(newKeys); i++ {
|
||||
var channel *model.Channel
|
||||
toPass = append(toPass, &channel)
|
||||
}
|
||||
|
||||
errs := s.rootStore.doMultiReadCache(s.rootStore.roleCache, newKeys, toPass)
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
if err != cache.ErrKeyNotFound {
|
||||
s.rootStore.logger.Warn("Error in Channelstore.GetByNames: ", mlog.Err(err))
|
||||
}
|
||||
misses = append(misses, strings.TrimPrefix(newKeys[i], teamId))
|
||||
} else {
|
||||
misses = append(misses, name)
|
||||
gotChannel := *(toPass[i].(**model.Channel))
|
||||
if (gotChannel != nil) && (includeArchivedChannels || gotChannel.DeleteAt == 0) {
|
||||
channels = append(channels, gotChannel)
|
||||
} else if gotChannel == nil {
|
||||
s.rootStore.logger.Warn("Found nil channel in getByNames. This is not expected")
|
||||
}
|
||||
}
|
||||
}
|
||||
names = misses
|
||||
@ -408,6 +441,7 @@ func (s LocalCacheChannelStore) SaveMember(rctx request.CTX, member *model.Chann
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// For redis, directly increment member count.
|
||||
s.InvalidateMemberCount(member.ChannelId)
|
||||
return member, nil
|
||||
}
|
||||
@ -427,13 +461,24 @@ func (s LocalCacheChannelStore) GetChannelsMemberCount(channelIDs []string) (_ m
|
||||
counts := make(map[string]int64)
|
||||
remainingChannels := make([]string, 0)
|
||||
|
||||
for _, channelID := range channelIDs {
|
||||
toPass := make([]any, 0, len(channelIDs))
|
||||
for i := 0; i < len(channelIDs); i++ {
|
||||
var cacheItem int64
|
||||
err := s.rootStore.doStandardReadCache(s.rootStore.channelMemberCountsCache, channelID, &cacheItem)
|
||||
if err == nil {
|
||||
counts[channelID] = cacheItem
|
||||
toPass = append(toPass, &cacheItem)
|
||||
}
|
||||
|
||||
errs := s.rootStore.doMultiReadCache(s.rootStore.reaction.rootStore.channelMemberCountsCache, channelIDs, toPass)
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
if err != cache.ErrKeyNotFound {
|
||||
s.rootStore.logger.Warn("Error in Channelstore.GetChannelsMemberCount: ", mlog.Err(err))
|
||||
}
|
||||
remainingChannels = append(remainingChannels, channelIDs[i])
|
||||
} else {
|
||||
remainingChannels = append(remainingChannels, channelID)
|
||||
gotCount := *(toPass[i].(*int64))
|
||||
if gotCount != 0 {
|
||||
counts[channelIDs[i]] = gotCount
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -477,6 +522,7 @@ func (s LocalCacheChannelStore) RemoveMember(rctx request.CTX, channelId, userId
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For redis, directly decrement member count
|
||||
s.InvalidateMemberCount(channelId)
|
||||
return nil
|
||||
}
|
||||
@ -486,6 +532,7 @@ func (s LocalCacheChannelStore) RemoveMembers(rctx request.CTX, channelId string
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For redis, directly decrement member count
|
||||
s.InvalidateMemberCount(channelId)
|
||||
return nil
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
)
|
||||
@ -20,11 +21,12 @@ func TestChannelStore(t *testing.T) {
|
||||
|
||||
func TestChannelStoreChannelMemberCountsCache(t *testing.T) {
|
||||
countResult := int64(10)
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := cachedStore.Channel().GetMemberCount("id", true)
|
||||
@ -40,7 +42,7 @@ func TestChannelStoreChannelMemberCountsCache(t *testing.T) {
|
||||
t.Run("first call not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetMemberCount("id", true)
|
||||
@ -52,7 +54,7 @@ func TestChannelStoreChannelMemberCountsCache(t *testing.T) {
|
||||
t.Run("first call force not cached, second not cached, third cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetMemberCount("id", false)
|
||||
@ -66,7 +68,7 @@ func TestChannelStoreChannelMemberCountsCache(t *testing.T) {
|
||||
t.Run("first call with GetMemberCountFromCache not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
count := cachedStore.Channel().GetMemberCountFromCache("id")
|
||||
@ -80,7 +82,7 @@ func TestChannelStoreChannelMemberCountsCache(t *testing.T) {
|
||||
t.Run("first call not cached, clear cache, second call not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetMemberCount("id", true)
|
||||
@ -93,7 +95,7 @@ func TestChannelStoreChannelMemberCountsCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate cache, second call not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetMemberCount("id", true)
|
||||
@ -109,11 +111,12 @@ func TestChannelStoreChannelsMemberCountCache(t *testing.T) {
|
||||
"channel1": 10,
|
||||
"channel2": 20,
|
||||
}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
channelsCount, err := cachedStore.Channel().GetChannelsMemberCount([]string{"channel1", "channel2"})
|
||||
@ -129,7 +132,7 @@ func TestChannelStoreChannelsMemberCountCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate cache, second call not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetChannelsMemberCount([]string{"channel1", "channel2"})
|
||||
@ -143,11 +146,12 @@ func TestChannelStoreChannelsMemberCountCache(t *testing.T) {
|
||||
|
||||
func TestChannelStoreChannelPinnedPostsCountsCache(t *testing.T) {
|
||||
countResult := int64(10)
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := cachedStore.Channel().GetPinnedPostCount("id", true)
|
||||
@ -163,7 +167,7 @@ func TestChannelStoreChannelPinnedPostsCountsCache(t *testing.T) {
|
||||
t.Run("first call not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetPinnedPostCount("id", true)
|
||||
@ -175,7 +179,7 @@ func TestChannelStoreChannelPinnedPostsCountsCache(t *testing.T) {
|
||||
t.Run("first call force not cached, second not cached, third cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetPinnedPostCount("id", false)
|
||||
@ -189,7 +193,7 @@ func TestChannelStoreChannelPinnedPostsCountsCache(t *testing.T) {
|
||||
t.Run("first call not cached, clear cache, second call not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetPinnedPostCount("id", true)
|
||||
@ -202,7 +206,7 @@ func TestChannelStoreChannelPinnedPostsCountsCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate cache, second call not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetPinnedPostCount("id", true)
|
||||
@ -215,11 +219,12 @@ func TestChannelStoreChannelPinnedPostsCountsCache(t *testing.T) {
|
||||
|
||||
func TestChannelStoreGuestCountCache(t *testing.T) {
|
||||
countResult := int64(12)
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := cachedStore.Channel().GetGuestCount("id", true)
|
||||
@ -235,7 +240,7 @@ func TestChannelStoreGuestCountCache(t *testing.T) {
|
||||
t.Run("first call not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetGuestCount("id", true)
|
||||
@ -247,7 +252,7 @@ func TestChannelStoreGuestCountCache(t *testing.T) {
|
||||
t.Run("first call force not cached, second not cached, third cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetGuestCount("id", false)
|
||||
@ -261,7 +266,7 @@ func TestChannelStoreGuestCountCache(t *testing.T) {
|
||||
t.Run("first call not cached, clear cache, second call not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetGuestCount("id", true)
|
||||
@ -274,7 +279,7 @@ func TestChannelStoreGuestCountCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate cache, second call not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().GetGuestCount("id", true)
|
||||
@ -287,11 +292,13 @@ func TestChannelStoreGuestCountCache(t *testing.T) {
|
||||
|
||||
func TestChannelStoreChannel(t *testing.T) {
|
||||
channelId := "channel1"
|
||||
fakeChannel := model.Channel{Id: channelId}
|
||||
fakeChannel := model.Channel{Id: channelId, Name: "channel1-name"}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call by id not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
channel, err := cachedStore.Channel().Get(channelId, true)
|
||||
@ -307,7 +314,7 @@ func TestChannelStoreChannel(t *testing.T) {
|
||||
t.Run("first call not cached, second force no cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().Get(channelId, true)
|
||||
@ -319,7 +326,7 @@ func TestChannelStoreChannel(t *testing.T) {
|
||||
t.Run("first call force no cached, second not cached, third cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
cachedStore.Channel().Get(channelId, false)
|
||||
mockStore.Channel().(*mocks.ChannelStore).AssertNumberOfCalls(t, "Get", 1)
|
||||
@ -332,7 +339,7 @@ func TestChannelStoreChannel(t *testing.T) {
|
||||
t.Run("first call not cached, clear cache, second call not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Channel().Get(channelId, true)
|
||||
@ -345,7 +352,7 @@ func TestChannelStoreChannel(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate cache, second call not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
cachedStore.Channel().Get(channelId, true)
|
||||
mockStore.Channel().(*mocks.ChannelStore).AssertNumberOfCalls(t, "Get", 1)
|
||||
@ -354,3 +361,59 @@ func TestChannelStoreChannel(t *testing.T) {
|
||||
mockStore.Channel().(*mocks.ChannelStore).AssertNumberOfCalls(t, "Get", 2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChannelStoreGetManyCache(t *testing.T) {
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
fakeChannel := model.Channel{Id: "channel1", Name: "channel1-name"}
|
||||
fakeChannel2 := model.Channel{Id: "channel2", Name: "channel2-name"}
|
||||
channels, err := cachedStore.Channel().GetMany([]string{fakeChannel.Id}, true)
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, model.ChannelList{&fakeChannel}, channels)
|
||||
mockStore.Channel().(*mocks.ChannelStore).AssertNumberOfCalls(t, "GetMany", 1)
|
||||
|
||||
channels, err = cachedStore.Channel().GetMany([]string{fakeChannel.Id}, true)
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, model.ChannelList{&fakeChannel}, channels)
|
||||
mockStore.Channel().(*mocks.ChannelStore).AssertNumberOfCalls(t, "GetMany", 1)
|
||||
|
||||
channels, err = cachedStore.Channel().GetMany([]string{fakeChannel.Id, fakeChannel2.Id}, true)
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, model.ChannelList{&fakeChannel, &fakeChannel2}, channels)
|
||||
mockStore.Channel().(*mocks.ChannelStore).AssertNumberOfCalls(t, "GetMany", 2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChannelStoreGetByNamesCache(t *testing.T) {
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
fakeChannel := model.Channel{Id: "channel1", Name: "channel1-name"}
|
||||
fakeChannel2 := model.Channel{Id: "channel2", Name: "channel2-name"}
|
||||
channels, err := cachedStore.Channel().GetByNames("team1", []string{fakeChannel.Name}, true)
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, []*model.Channel{&fakeChannel}, channels)
|
||||
mockStore.Channel().(*mocks.ChannelStore).AssertNumberOfCalls(t, "GetByNames", 1)
|
||||
|
||||
channels, err = cachedStore.Channel().GetByNames("team1", []string{fakeChannel.Name}, true)
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, []*model.Channel{&fakeChannel}, channels)
|
||||
mockStore.Channel().(*mocks.ChannelStore).AssertNumberOfCalls(t, "GetByNames", 1)
|
||||
|
||||
channels, err = cachedStore.Channel().GetByNames("team1", []string{fakeChannel.Name, fakeChannel2.Name}, true)
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, []*model.Channel{&fakeChannel, &fakeChannel2}, channels)
|
||||
mockStore.Channel().(*mocks.ChannelStore).AssertNumberOfCalls(t, "GetByNames", 2)
|
||||
})
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/public/shared/request"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
@ -21,6 +22,7 @@ func TestEmojiStore(t *testing.T) {
|
||||
|
||||
func TestEmojiStoreCache(t *testing.T) {
|
||||
rctx := request.TestContext(t)
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
fakeEmoji := model.Emoji{Id: "123", Name: "name123"}
|
||||
fakeEmoji2 := model.Emoji{Id: "321", Name: "name321"}
|
||||
@ -29,7 +31,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("first call by id not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
emoji, err := cachedStore.Emoji().Get(rctx, "123", true)
|
||||
@ -45,7 +47,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("GetByName: first call by name not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
emoji, err := cachedStore.Emoji().GetByName(rctx, "name123", true)
|
||||
@ -61,7 +63,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("GetMultipleByName: first call by name not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
emojis, err := cachedStore.Emoji().GetMultipleByName(rctx, []string{"name123"})
|
||||
@ -79,7 +81,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("GetMultipleByName: multiple elements", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
emojis, err := cachedStore.Emoji().GetMultipleByName(rctx, []string{"name123", "name321"})
|
||||
@ -98,7 +100,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("first call by id not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Emoji().Get(rctx, "123", true)
|
||||
@ -110,7 +112,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("first call by name not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Emoji().GetByName(rctx, "name123", true)
|
||||
@ -122,7 +124,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("first call by id force not cached, second not cached, third cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Emoji().Get(rctx, "123", false)
|
||||
@ -136,7 +138,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("first call by name force not cached, second not cached, third cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Emoji().GetByName(rctx, "name123", false)
|
||||
@ -150,7 +152,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("first call by id, second call by name and GetMultipleByName cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Emoji().Get(rctx, "123", true)
|
||||
@ -164,7 +166,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("first call by name, second call by id cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Emoji().GetByName(rctx, "name123", true)
|
||||
@ -176,7 +178,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("first call by id not cached, invalidate, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Emoji().Get(rctx, "123", true)
|
||||
@ -189,7 +191,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("call by id, use master", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Emoji().Get(rctx, "master", true)
|
||||
@ -202,7 +204,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("first call by name not cached, invalidate, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Emoji().GetByName(rctx, "name123", true)
|
||||
@ -215,7 +217,7 @@ func TestEmojiStoreCache(t *testing.T) {
|
||||
t.Run("call by name, use master", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Emoji().GetByName(rctx, "master", true)
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
)
|
||||
@ -20,11 +21,12 @@ func TestFileInfoStore(t *testing.T) {
|
||||
|
||||
func TestFileInfoStoreCache(t *testing.T) {
|
||||
fakeFileInfo := model.FileInfo{PostId: "123"}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
fileInfos, err := cachedStore.FileInfo().GetForPost("123", true, true, true)
|
||||
@ -39,7 +41,7 @@ func TestFileInfoStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, second force no cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.FileInfo().GetForPost("123", true, true, true)
|
||||
@ -51,7 +53,7 @@ func TestFileInfoStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.FileInfo().GetForPost("123", true, true, true)
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store"
|
||||
"github.com/mattermost/mattermost/server/v8/einterfaces"
|
||||
"github.com/mattermost/mattermost/server/v8/platform/services/cache"
|
||||
@ -74,8 +75,10 @@ var clearCacheMessageData = []byte("")
|
||||
|
||||
type LocalCacheStore struct {
|
||||
store.Store
|
||||
metrics einterfaces.MetricsInterface
|
||||
cluster einterfaces.ClusterInterface
|
||||
cacheType string
|
||||
metrics einterfaces.MetricsInterface
|
||||
cluster einterfaces.ClusterInterface
|
||||
logger mlog.LoggerIFace
|
||||
|
||||
reaction LocalCacheReactionStore
|
||||
reactionCache cache.Cache
|
||||
@ -123,11 +126,13 @@ type LocalCacheStore struct {
|
||||
termsOfServiceCache cache.Cache
|
||||
}
|
||||
|
||||
func NewLocalCacheLayer(baseStore store.Store, metrics einterfaces.MetricsInterface, cluster einterfaces.ClusterInterface, cacheProvider cache.Provider) (localCacheStore LocalCacheStore, err error) {
|
||||
func NewLocalCacheLayer(baseStore store.Store, metrics einterfaces.MetricsInterface, cluster einterfaces.ClusterInterface, cacheProvider cache.Provider, logger mlog.LoggerIFace) (localCacheStore LocalCacheStore, err error) {
|
||||
localCacheStore = LocalCacheStore{
|
||||
Store: baseStore,
|
||||
cluster: cluster,
|
||||
metrics: metrics,
|
||||
Store: baseStore,
|
||||
cluster: cluster,
|
||||
metrics: metrics,
|
||||
cacheType: cacheProvider.Type(),
|
||||
logger: logger,
|
||||
}
|
||||
// Reactions
|
||||
if localCacheStore.reactionCache, err = cacheProvider.NewCache(&cache.CacheOptions{
|
||||
@ -251,9 +256,10 @@ func NewLocalCacheLayer(baseStore store.Store, metrics einterfaces.MetricsInterf
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if localCacheStore.channelMembersForUserCache, err = cacheProvider.NewCache(&cache.CacheOptions{
|
||||
Size: AllChannelMembersForUserCacheSize,
|
||||
Name: "ChannnelMembersForUser",
|
||||
Name: "ChannelMembersForUser",
|
||||
DefaultExpiry: AllChannelMembersForUserCacheDuration,
|
||||
InvalidateClusterEvent: model.ClusterEventInvalidateCacheForUser,
|
||||
}); err != nil {
|
||||
@ -334,7 +340,7 @@ func NewLocalCacheLayer(baseStore store.Store, metrics einterfaces.MetricsInterf
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
if localCacheStore.profilesInChannelCache, err = cacheProvider.NewCache(&cache.CacheOptions{
|
||||
if localCacheStore.profilesInChannelCache, err = cache.NewProvider().NewCache(&cache.CacheOptions{
|
||||
Size: ProfilesInChannelCacheSize,
|
||||
Name: "ProfilesInChannel",
|
||||
DefaultExpiry: ProfilesInChannelCacheSec * time.Second,
|
||||
@ -445,7 +451,7 @@ func (s LocalCacheStore) DropAllTables() {
|
||||
|
||||
func (s *LocalCacheStore) doInvalidateCacheCluster(cache cache.Cache, key string, props map[string]string) {
|
||||
cache.Remove(key)
|
||||
if s.cluster != nil {
|
||||
if s.cluster != nil && s.cacheType == model.CacheTypeLRU {
|
||||
msg := &model.ClusterMessage{
|
||||
Event: cache.GetInvalidateClusterEvent(),
|
||||
SendType: model.ClusterSendBestEffort,
|
||||
@ -476,9 +482,23 @@ func (s *LocalCacheStore) doStandardReadCache(cache cache.Cache, key string, val
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *LocalCacheStore) doMultiReadCache(cache cache.Cache, keys []string, values []any) []error {
|
||||
errs := cache.GetMulti(keys, values)
|
||||
if s.metrics != nil {
|
||||
for _, err := range errs {
|
||||
if err == nil {
|
||||
s.metrics.IncrementMemCacheHitCounter(cache.Name())
|
||||
continue
|
||||
}
|
||||
s.metrics.IncrementMemCacheMissCounter(cache.Name())
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (s *LocalCacheStore) doClearCacheCluster(cache cache.Cache) {
|
||||
cache.Purge()
|
||||
if s.cluster != nil {
|
||||
if s.cluster != nil && s.cacheType == model.CacheTypeLRU {
|
||||
msg := &model.ClusterMessage{
|
||||
Event: cache.GetInvalidateClusterEvent(),
|
||||
SendType: model.ClusterSendBestEffort,
|
||||
|
@ -108,7 +108,7 @@ func initStores(logger mlog.LoggerIFace) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
st.Store, err = NewLocalCacheLayer(st.SqlStore, nil, nil, getMockCacheProvider())
|
||||
st.Store, err = NewLocalCacheLayer(st.SqlStore, nil, nil, getMockCacheProvider(), logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -25,7 +25,8 @@ var mainHelper *testlib.MainHelper
|
||||
func getMockCacheProvider() cache.Provider {
|
||||
mockCacheProvider := cachemocks.Provider{}
|
||||
mockCacheProvider.On("NewCache", mock.Anything).
|
||||
Return(cache.NewLRU(cache.LRUOptions{Size: 128}), nil)
|
||||
Return(cache.NewLRU(&cache.CacheOptions{Size: 128}), nil)
|
||||
mockCacheProvider.On("Type").Return("lru")
|
||||
return &mockCacheProvider
|
||||
}
|
||||
|
||||
@ -41,11 +42,13 @@ func getMockStore(t *testing.T) *mocks.Store {
|
||||
mockStore.On("Reaction").Return(&mockReactionsStore)
|
||||
|
||||
fakeRole := model.Role{Id: "123", Name: "role-name"}
|
||||
fakeRole2 := model.Role{Id: "456", Name: "role-name2"}
|
||||
mockRolesStore := mocks.RoleStore{}
|
||||
mockRolesStore.On("Save", &fakeRole).Return(&model.Role{}, nil)
|
||||
mockRolesStore.On("Delete", "123").Return(&fakeRole, nil)
|
||||
mockRolesStore.On("GetByName", context.Background(), "role-name").Return(&fakeRole, nil)
|
||||
mockRolesStore.On("GetByNames", []string{"role-name"}).Return([]*model.Role{&fakeRole}, nil)
|
||||
mockRolesStore.On("GetByNames", []string{"role-name2"}).Return([]*model.Role{&fakeRole2}, nil)
|
||||
mockRolesStore.On("PermanentDeleteAll").Return(nil)
|
||||
mockStore.On("Role").Return(&mockRolesStore)
|
||||
|
||||
@ -90,15 +93,20 @@ func getMockStore(t *testing.T) *mocks.Store {
|
||||
mockCount := int64(10)
|
||||
mockGuestCount := int64(12)
|
||||
channelId := "channel1"
|
||||
fakeChannelId := model.Channel{Id: channelId}
|
||||
fakeChannel1 := model.Channel{Id: channelId, Name: "channel1-name"}
|
||||
fakeChannel2 := model.Channel{Id: "channel2", Name: "channel2-name"}
|
||||
mockChannelStore := mocks.ChannelStore{}
|
||||
mockChannelStore.On("ClearCaches").Return()
|
||||
mockChannelStore.On("GetMemberCount", "id", true).Return(mockCount, nil)
|
||||
mockChannelStore.On("GetMemberCount", "id", false).Return(mockCount, nil)
|
||||
mockChannelStore.On("GetGuestCount", "id", true).Return(mockGuestCount, nil)
|
||||
mockChannelStore.On("GetGuestCount", "id", false).Return(mockGuestCount, nil)
|
||||
mockChannelStore.On("Get", channelId, true).Return(&fakeChannelId, nil)
|
||||
mockChannelStore.On("Get", channelId, false).Return(&fakeChannelId, nil)
|
||||
mockChannelStore.On("Get", channelId, true).Return(&fakeChannel1, nil)
|
||||
mockChannelStore.On("Get", channelId, false).Return(&fakeChannel1, nil)
|
||||
mockChannelStore.On("GetMany", []string{channelId}, true).Return(model.ChannelList{&fakeChannel1}, nil)
|
||||
mockChannelStore.On("GetMany", []string{fakeChannel2.Id}, true).Return(model.ChannelList{&fakeChannel2}, nil)
|
||||
mockChannelStore.On("GetByNames", "team1", []string{fakeChannel1.Name}, true).Return([]*model.Channel{&fakeChannel1}, nil)
|
||||
mockChannelStore.On("GetByNames", "team1", []string{fakeChannel2.Name}, true).Return([]*model.Channel{&fakeChannel2}, nil)
|
||||
mockStore.On("Channel").Return(&mockChannelStore)
|
||||
|
||||
mockChannelsMemberCount := map[string]int64{
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
)
|
||||
@ -27,11 +28,12 @@ func TestPostStoreLastPostTimeCache(t *testing.T) {
|
||||
Time: fakeLastTime,
|
||||
SkipFetchThreads: false,
|
||||
}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("GetEtag: first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedResult := fmt.Sprintf("%v.%v", model.CurrentVersion, fakeLastTime)
|
||||
@ -48,7 +50,7 @@ func TestPostStoreLastPostTimeCache(t *testing.T) {
|
||||
t.Run("GetEtag: first call not cached, second force no cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Post().GetEtag(channelId, true, false)
|
||||
@ -60,7 +62,7 @@ func TestPostStoreLastPostTimeCache(t *testing.T) {
|
||||
t.Run("GetEtag: first call not cached, invalidate, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Post().GetEtag(channelId, true, false)
|
||||
@ -73,7 +75,7 @@ func TestPostStoreLastPostTimeCache(t *testing.T) {
|
||||
t.Run("GetEtag: first call not cached, clear caches, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Post().GetEtag(channelId, true, false)
|
||||
@ -86,7 +88,7 @@ func TestPostStoreLastPostTimeCache(t *testing.T) {
|
||||
t.Run("GetPostsSince: first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedResult := model.NewPostList()
|
||||
@ -105,7 +107,7 @@ func TestPostStoreLastPostTimeCache(t *testing.T) {
|
||||
t.Run("GetPostsSince: first call not cached, second force no cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Post().GetPostsSince(fakeOptions, true, map[string]bool{})
|
||||
@ -117,7 +119,7 @@ func TestPostStoreLastPostTimeCache(t *testing.T) {
|
||||
t.Run("GetPostsSince: first call not cached, invalidate, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Post().GetPostsSince(fakeOptions, true, map[string]bool{})
|
||||
@ -130,7 +132,7 @@ func TestPostStoreLastPostTimeCache(t *testing.T) {
|
||||
t.Run("GetPostsSince: first call not cached, clear caches, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Post().GetPostsSince(fakeOptions, true, map[string]bool{})
|
||||
@ -144,11 +146,12 @@ func TestPostStoreLastPostTimeCache(t *testing.T) {
|
||||
func TestPostStoreCache(t *testing.T) {
|
||||
fakePosts := &model.PostList{}
|
||||
fakeOptions := model.GetPostsOptions{ChannelId: "123", PerPage: 30}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotPosts, err := cachedStore.Post().GetPosts(fakeOptions, true, map[string]bool{})
|
||||
@ -163,7 +166,7 @@ func TestPostStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotPosts, err := cachedStore.Post().GetPosts(fakeOptions, true, map[string]bool{})
|
||||
@ -178,7 +181,7 @@ func TestPostStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotPosts, err := cachedStore.Post().GetPosts(fakeOptions, true, map[string]bool{})
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
)
|
||||
@ -20,11 +21,12 @@ func TestReactionStore(t *testing.T) {
|
||||
|
||||
func TestReactionStoreCache(t *testing.T) {
|
||||
fakeReaction := model.Reaction{PostId: "123"}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
reaction, err := cachedStore.Reaction().GetForPost("123", true)
|
||||
@ -39,7 +41,7 @@ func TestReactionStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Reaction().GetForPost("123", true)
|
||||
@ -51,7 +53,7 @@ func TestReactionStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, save, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Reaction().GetForPost("123", true)
|
||||
@ -64,7 +66,7 @@ func TestReactionStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, delete, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Reaction().GetForPost("123", true)
|
||||
|
@ -10,7 +10,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store"
|
||||
"github.com/mattermost/mattermost/server/v8/platform/services/cache"
|
||||
)
|
||||
|
||||
type LocalCacheRoleStore struct {
|
||||
@ -60,15 +62,32 @@ func (s LocalCacheRoleStore) GetByNames(names []string) ([]*model.Role, error) {
|
||||
var foundRoles []*model.Role
|
||||
var rolesToQuery []string
|
||||
|
||||
for _, roleName := range names {
|
||||
toPass := make([]any, 0, len(names))
|
||||
for i := 0; i < len(names); i++ {
|
||||
var role *model.Role
|
||||
if err := s.rootStore.doStandardReadCache(s.rootStore.roleCache, roleName, &role); err == nil {
|
||||
foundRoles = append(foundRoles, role)
|
||||
toPass = append(toPass, &role)
|
||||
}
|
||||
errs := s.rootStore.doMultiReadCache(s.rootStore.roleCache, names, toPass)
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
if err != cache.ErrKeyNotFound {
|
||||
s.rootStore.logger.Warn("Error in Rolestore.GetByNames: ", mlog.Err(err))
|
||||
}
|
||||
rolesToQuery = append(rolesToQuery, names[i])
|
||||
} else {
|
||||
rolesToQuery = append(rolesToQuery, roleName)
|
||||
gotRole := *(toPass[i].(**model.Role))
|
||||
if gotRole != nil {
|
||||
foundRoles = append(foundRoles, gotRole)
|
||||
} else {
|
||||
s.rootStore.logger.Warn("Found nil role in GetByNames. This is not expected")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(rolesToQuery) == 0 {
|
||||
return foundRoles, nil
|
||||
}
|
||||
|
||||
roles, err := s.RoleStore.GetByNames(rolesToQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
)
|
||||
@ -21,11 +22,12 @@ func TestRoleStore(t *testing.T) {
|
||||
|
||||
func TestRoleStoreCache(t *testing.T) {
|
||||
fakeRole := model.Role{Id: "123", Name: "role-name"}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
role, err := cachedStore.Role().GetByName(context.Background(), "role-name")
|
||||
@ -41,7 +43,7 @@ func TestRoleStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, save, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Role().GetByName(context.Background(), "role-name")
|
||||
@ -54,7 +56,7 @@ func TestRoleStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, delete, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Role().GetByName(context.Background(), "role-name")
|
||||
@ -67,7 +69,7 @@ func TestRoleStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, permanent delete all, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Role().GetByName(context.Background(), "role-name")
|
||||
@ -77,3 +79,31 @@ func TestRoleStoreCache(t *testing.T) {
|
||||
mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByName", 2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRoleStoreGetByNamesCache(t *testing.T) {
|
||||
fakeRole := model.Role{Id: "123", Name: "role-name"}
|
||||
fakeRole2 := model.Role{Id: "456", Name: "role-name2"}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
roles, err := cachedStore.Role().GetByNames([]string{"role-name"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, roles[0], &fakeRole)
|
||||
mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByNames", 1)
|
||||
|
||||
roles, err = cachedStore.Role().GetByNames([]string{"role-name"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, roles[0], &fakeRole)
|
||||
mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByNames", 1)
|
||||
|
||||
roles, err = cachedStore.Role().GetByNames([]string{"role-name", "role-name2"})
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, []*model.Role{&fakeRole, &fakeRole2}, roles)
|
||||
mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByNames", 2)
|
||||
})
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
)
|
||||
@ -20,11 +21,12 @@ func TestSchemeStore(t *testing.T) {
|
||||
|
||||
func TestSchemeStoreCache(t *testing.T) {
|
||||
fakeScheme := model.Scheme{Id: "123", Name: "scheme-name"}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
scheme, err := cachedStore.Scheme().Get("123")
|
||||
@ -40,7 +42,7 @@ func TestSchemeStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, save, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Scheme().Get("123")
|
||||
@ -53,7 +55,7 @@ func TestSchemeStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, delete, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Scheme().Get("123")
|
||||
@ -66,7 +68,7 @@ func TestSchemeStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, permanent delete all, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Scheme().Get("123")
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
)
|
||||
@ -20,11 +21,12 @@ func TestTeamStore(t *testing.T) {
|
||||
func TestTeamStoreCache(t *testing.T) {
|
||||
fakeUserId := "123"
|
||||
fakeUserTeamIds := []string{"1", "2", "3"}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotUserTeamIds, err := cachedStore.Team().GetUserTeamIds(fakeUserId, true)
|
||||
@ -41,7 +43,7 @@ func TestTeamStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotUserTeamIds, err := cachedStore.Team().GetUserTeamIds(fakeUserId, true)
|
||||
@ -58,7 +60,7 @@ func TestTeamStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotUserTeamIds, err := cachedStore.Team().GetUserTeamIds(fakeUserId, true)
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
)
|
||||
@ -20,11 +21,12 @@ func TestTermsOfServiceStore(t *testing.T) {
|
||||
|
||||
func TestTermsOfServiceStoreTermsOfServiceCache(t *testing.T) {
|
||||
fakeTermsOfService := model.TermsOfService{Id: "123", CreateAt: 11111, UserId: "321", Text: "Terms of service test"}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call by latest not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
termsOfService, err := cachedStore.TermsOfService().GetLatest(true)
|
||||
@ -40,7 +42,7 @@ func TestTermsOfServiceStoreTermsOfServiceCache(t *testing.T) {
|
||||
t.Run("first call by id not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
termsOfService, err := cachedStore.TermsOfService().Get("123", true)
|
||||
@ -56,7 +58,7 @@ func TestTermsOfServiceStoreTermsOfServiceCache(t *testing.T) {
|
||||
t.Run("first call by id not cached, second force no cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.TermsOfService().Get("123", true)
|
||||
@ -68,7 +70,7 @@ func TestTermsOfServiceStoreTermsOfServiceCache(t *testing.T) {
|
||||
t.Run("first call latest not cached, second force no cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.TermsOfService().GetLatest(true)
|
||||
@ -80,7 +82,7 @@ func TestTermsOfServiceStoreTermsOfServiceCache(t *testing.T) {
|
||||
t.Run("first call by id force no cached, second not cached, third cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.TermsOfService().Get("123", false)
|
||||
@ -94,7 +96,7 @@ func TestTermsOfServiceStoreTermsOfServiceCache(t *testing.T) {
|
||||
t.Run("first call latest force no cached, second not cached, third cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.TermsOfService().GetLatest(false)
|
||||
@ -108,7 +110,7 @@ func TestTermsOfServiceStoreTermsOfServiceCache(t *testing.T) {
|
||||
t.Run("first call latest, second call by id cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.TermsOfService().GetLatest(true)
|
||||
@ -120,7 +122,7 @@ func TestTermsOfServiceStoreTermsOfServiceCache(t *testing.T) {
|
||||
t.Run("first call by id not cached, save, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.TermsOfService().Get("123", false)
|
||||
@ -133,7 +135,7 @@ func TestTermsOfServiceStoreTermsOfServiceCache(t *testing.T) {
|
||||
t.Run("first get latest not cached, save new, then get latest, returning different data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.TermsOfService().GetLatest(true)
|
||||
|
@ -10,8 +10,10 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/sqlstore"
|
||||
"github.com/mattermost/mattermost/server/v8/platform/services/cache"
|
||||
)
|
||||
|
||||
type LocalCacheUserStore struct {
|
||||
@ -74,9 +76,11 @@ func (s *LocalCacheUserStore) InvalidateProfileCacheForUser(userId string) {
|
||||
}
|
||||
|
||||
func (s *LocalCacheUserStore) InvalidateProfilesInChannelCacheByUser(userId string) {
|
||||
// TODO: use scan here
|
||||
keys, err := s.rootStore.profilesInChannelCache.Keys()
|
||||
if err == nil {
|
||||
for _, key := range keys {
|
||||
// TODO: use MGET here on batches of keys
|
||||
var userMap map[string]*model.User
|
||||
if err = s.rootStore.profilesInChannelCache.Get(key, &userMap); err == nil {
|
||||
if _, userInCache := userMap[userId]; userInCache {
|
||||
@ -154,22 +158,33 @@ func (s *LocalCacheUserStore) GetProfileByIds(ctx context.Context, userIds []str
|
||||
remainingUserIds := make([]string, 0)
|
||||
|
||||
fromMaster := false
|
||||
for _, userId := range userIds {
|
||||
var cacheItem *model.User
|
||||
if err := s.rootStore.doStandardReadCache(s.rootStore.userProfileByIdsCache, userId, &cacheItem); err == nil {
|
||||
if options.Since == 0 || cacheItem.UpdateAt > options.Since {
|
||||
users = append(users, cacheItem)
|
||||
toPass := make([]any, 0, len(userIds))
|
||||
for i := 0; i < len(userIds); i++ {
|
||||
var user *model.User
|
||||
toPass = append(toPass, &user)
|
||||
}
|
||||
errs := s.rootStore.doMultiReadCache(s.rootStore.userProfileByIdsCache, userIds, toPass)
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
if err != cache.ErrKeyNotFound {
|
||||
s.rootStore.logger.Warn("Error in UserStore.GetProfileByIds: ", mlog.Err(err))
|
||||
}
|
||||
} else {
|
||||
// If it was invalidated, then we need to query master.
|
||||
s.userProfileByIdsMut.Lock()
|
||||
if s.userProfileByIdsInvalidations[userId] {
|
||||
if s.userProfileByIdsInvalidations[userIds[i]] {
|
||||
fromMaster = true
|
||||
// And then remove the key from the map.
|
||||
delete(s.userProfileByIdsInvalidations, userId)
|
||||
delete(s.userProfileByIdsInvalidations, userIds[i])
|
||||
}
|
||||
s.userProfileByIdsMut.Unlock()
|
||||
remainingUserIds = append(remainingUserIds, userId)
|
||||
remainingUserIds = append(remainingUserIds, userIds[i])
|
||||
} else {
|
||||
gotUser := *(toPass[i].(**model.User))
|
||||
if (gotUser != nil) && (options.Since == 0 || gotUser.UpdateAt > options.Since) {
|
||||
users = append(users, gotUser)
|
||||
} else if gotUser == nil {
|
||||
s.rootStore.logger.Warn("Found nil user in GetProfileByIds. This is not expected")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,21 +244,34 @@ func (s *LocalCacheUserStore) GetMany(ctx context.Context, ids []string) ([]*mod
|
||||
uniqIDs := dedup(ids)
|
||||
|
||||
fromMaster := false
|
||||
for _, id := range uniqIDs {
|
||||
var cachedUser *model.User
|
||||
if err := s.rootStore.doStandardReadCache(s.rootStore.userProfileByIdsCache, id, &cachedUser); err == nil {
|
||||
cachedUsers = append(cachedUsers, cachedUser)
|
||||
} else {
|
||||
toPass := make([]any, 0, len(uniqIDs))
|
||||
for i := 0; i < len(uniqIDs); i++ {
|
||||
var user *model.User
|
||||
toPass = append(toPass, &user)
|
||||
}
|
||||
|
||||
errs := s.rootStore.doMultiReadCache(s.rootStore.userProfileByIdsCache, uniqIDs, toPass)
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
if err != cache.ErrKeyNotFound {
|
||||
s.rootStore.logger.Warn("Error in UserStore.GetMany: ", mlog.Err(err))
|
||||
}
|
||||
// If it was invalidated, then we need to query master.
|
||||
s.userProfileByIdsMut.Lock()
|
||||
if s.userProfileByIdsInvalidations[id] {
|
||||
if s.userProfileByIdsInvalidations[uniqIDs[i]] {
|
||||
fromMaster = true
|
||||
// And then remove the key from the map.
|
||||
delete(s.userProfileByIdsInvalidations, id)
|
||||
delete(s.userProfileByIdsInvalidations, uniqIDs[i])
|
||||
}
|
||||
s.userProfileByIdsMut.Unlock()
|
||||
|
||||
notCachedUserIds = append(notCachedUserIds, id)
|
||||
notCachedUserIds = append(notCachedUserIds, uniqIDs[i])
|
||||
} else {
|
||||
gotUser := *(toPass[i].(**model.User))
|
||||
if gotUser != nil {
|
||||
cachedUsers = append(cachedUsers, gotUser)
|
||||
} else {
|
||||
s.rootStore.logger.Warn("Found nil user in GetMany. This is not expected")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/plugin/plugintest/mock"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
@ -28,11 +29,12 @@ func TestUserStoreCache(t *testing.T) {
|
||||
AuthData: model.NewPointer("authData"),
|
||||
AuthService: "authService",
|
||||
}}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotUser, err := cachedStore.User().GetProfileByIds(context.Background(), fakeUserIds, &store.UserGetByIdsOpts{}, true)
|
||||
@ -47,7 +49,7 @@ func TestUserStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotUser, err := cachedStore.User().GetProfileByIds(context.Background(), fakeUserIds, &store.UserGetByIdsOpts{}, true)
|
||||
@ -62,7 +64,7 @@ func TestUserStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotUser, err := cachedStore.User().GetProfileByIds(context.Background(), fakeUserIds, &store.UserGetByIdsOpts{}, true)
|
||||
@ -78,7 +80,7 @@ func TestUserStoreCache(t *testing.T) {
|
||||
t.Run("should always return a copy of the stored data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
storedUsers, err := mockStore.User().GetProfileByIds(context.Background(), fakeUserIds, &store.UserGetByIdsOpts{}, false)
|
||||
@ -119,10 +121,11 @@ func TestUserStoreCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUserStoreGetAllProfiles(t *testing.T) {
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
users, err := cachedStore.User().GetAllProfiles(&model.UserGetOptions{Page: 0, PerPage: 100})
|
||||
@ -141,7 +144,7 @@ func TestUserStoreGetAllProfiles(t *testing.T) {
|
||||
t.Run("different page sizes aren't cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _ = cachedStore.User().GetAllProfiles(&model.UserGetOptions{Page: 0, PerPage: 100})
|
||||
@ -161,11 +164,12 @@ func TestUserStoreProfilesInChannelCache(t *testing.T) {
|
||||
fakeMap := map[string]*model.User{
|
||||
fakeUserId: {Id: "456"},
|
||||
}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotMap, err := cachedStore.User().GetAllProfilesInChannel(context.Background(), fakeChannelId, true)
|
||||
@ -180,7 +184,7 @@ func TestUserStoreProfilesInChannelCache(t *testing.T) {
|
||||
t.Run("first call not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotMap, err := cachedStore.User().GetAllProfilesInChannel(context.Background(), fakeChannelId, true)
|
||||
@ -195,7 +199,7 @@ func TestUserStoreProfilesInChannelCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate by channel, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotMap, err := cachedStore.User().GetAllProfilesInChannel(context.Background(), fakeChannelId, true)
|
||||
@ -212,7 +216,7 @@ func TestUserStoreProfilesInChannelCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate by user, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotMap, err := cachedStore.User().GetAllProfilesInChannel(context.Background(), fakeChannelId, true)
|
||||
@ -234,10 +238,11 @@ func TestUserStoreGetCache(t *testing.T) {
|
||||
AuthData: model.NewPointer("authData"),
|
||||
AuthService: "authService",
|
||||
}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotUser, err := cachedStore.User().Get(context.Background(), fakeUserId)
|
||||
@ -252,7 +257,7 @@ func TestUserStoreGetCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotUser, err := cachedStore.User().Get(context.Background(), fakeUserId)
|
||||
@ -269,7 +274,7 @@ func TestUserStoreGetCache(t *testing.T) {
|
||||
t.Run("should always return a copy of the stored data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
storedUser, err := mockStore.User().Get(context.Background(), fakeUserId)
|
||||
@ -309,10 +314,11 @@ func TestUserStoreGetManyCache(t *testing.T) {
|
||||
AuthData: model.NewPointer("authData"),
|
||||
AuthService: "authService",
|
||||
}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotUsers, err := cachedStore.User().GetMany(context.Background(), []string{fakeUser.Id, otherFakeUser.Id})
|
||||
@ -330,7 +336,7 @@ func TestUserStoreGetManyCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate one user, and then check that one is cached and one is fetched from db", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotUsers, err := cachedStore.User().GetMany(context.Background(), []string{fakeUser.Id, otherFakeUser.Id})
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/public/shared/mlog"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest"
|
||||
"github.com/mattermost/mattermost/server/v8/channels/store/storetest/mocks"
|
||||
)
|
||||
@ -20,11 +21,12 @@ func TestWebhookStore(t *testing.T) {
|
||||
|
||||
func TestWebhookStoreCache(t *testing.T) {
|
||||
fakeWebhook := model.IncomingWebhook{Id: "123"}
|
||||
logger := mlog.CreateConsoleTestLogger(t)
|
||||
|
||||
t.Run("first call not cached, second cached and returning same data", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
incomingWebhook, err := cachedStore.Webhook().GetIncoming("123", true)
|
||||
@ -40,7 +42,7 @@ func TestWebhookStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, second force not cached", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Webhook().GetIncoming("123", true)
|
||||
@ -52,7 +54,7 @@ func TestWebhookStoreCache(t *testing.T) {
|
||||
t.Run("first call not cached, invalidate, and then not cached again", func(t *testing.T) {
|
||||
mockStore := getMockStore(t)
|
||||
mockCacheProvider := getMockCacheProvider()
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider)
|
||||
cachedStore, err := NewLocalCacheLayer(mockStore, nil, nil, mockCacheProvider, logger)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedStore.Webhook().GetIncoming("123", true)
|
||||
|
@ -101,7 +101,7 @@ func setupTestHelper(tb testing.TB, includeCacheLayer bool, options []app.Option
|
||||
if includeCacheLayer {
|
||||
// Adds the cache layer to the test store
|
||||
var st localcachelayer.LocalCacheStore
|
||||
st, err = localcachelayer.NewLocalCacheLayer(s.Store(), s.GetMetrics(), s.Platform().Cluster(), s.Platform().CacheProvider())
|
||||
st, err = localcachelayer.NewLocalCacheLayer(s.Store(), s.GetMetrics(), s.Platform().Cluster(), s.Platform().CacheProvider(), testLogger)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -62,6 +62,7 @@ type MetricsInterface interface {
|
||||
ObserveFilesSearchDuration(elapsed float64)
|
||||
ObserveStoreMethodDuration(method, success string, elapsed float64)
|
||||
ObserveAPIEndpointDuration(endpoint, method, statusCode, originClient, pageLoadContext string, elapsed float64)
|
||||
ObserveRedisEndpointDuration(cacheName, operation string, elapsed float64)
|
||||
IncrementPostIndexCounter()
|
||||
IncrementFileIndexCounter()
|
||||
IncrementUserIndexCounter()
|
||||
|
@ -403,6 +403,11 @@ func (_m *MetricsInterface) ObservePostsSearchDuration(elapsed float64) {
|
||||
_m.Called(elapsed)
|
||||
}
|
||||
|
||||
// ObserveRedisEndpointDuration provides a mock function with given fields: cacheName, operation, elapsed
|
||||
func (_m *MetricsInterface) ObserveRedisEndpointDuration(cacheName string, operation string, elapsed float64) {
|
||||
_m.Called(cacheName, operation, elapsed)
|
||||
}
|
||||
|
||||
// ObserveRemoteClusterClockSkew provides a mock function with given fields: remoteID, skew
|
||||
func (_m *MetricsInterface) ObserveRemoteClusterClockSkew(remoteID string, skew float64) {
|
||||
_m.Called(remoteID, skew)
|
||||
|
@ -156,6 +156,7 @@ type MetricsInterfaceImpl struct {
|
||||
SearchFileSearchesDuration prometheus.Histogram
|
||||
StoreTimesHistograms *prometheus.HistogramVec
|
||||
APITimesHistograms *prometheus.HistogramVec
|
||||
RedisTimesHistograms *prometheus.HistogramVec
|
||||
SearchPostIndexCounter prometheus.Counter
|
||||
SearchFileIndexCounter prometheus.Counter
|
||||
SearchUserIndexCounter prometheus.Counter
|
||||
@ -764,6 +765,18 @@ func New(ps *platform.PlatformService, driver, dataSource string) *MetricsInterf
|
||||
)
|
||||
m.Registry.MustRegister(m.APITimesHistograms)
|
||||
|
||||
m.RedisTimesHistograms = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Subsystem: MetricsSubsystemDB,
|
||||
Name: "cache_time",
|
||||
Help: "Time to execute the cache handler",
|
||||
ConstLabels: additionalLabels,
|
||||
},
|
||||
[]string{"cache_name", "operation"},
|
||||
)
|
||||
m.Registry.MustRegister(m.RedisTimesHistograms)
|
||||
|
||||
m.SearchPostIndexCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Subsystem: MetricsSubsystemSearch,
|
||||
@ -1499,6 +1512,13 @@ func (mi *MetricsInterfaceImpl) ObserveAPIEndpointDuration(handler, method, stat
|
||||
mi.APITimesHistograms.With(prometheus.Labels{"handler": handler, "method": method, "status_code": statusCode, "origin_client": originClient, "page_load_context": pageLoadContext}).Observe(elapsed)
|
||||
}
|
||||
|
||||
func (mi *MetricsInterfaceImpl) ObserveRedisEndpointDuration(cacheName, operation string, elapsed float64) {
|
||||
mi.RedisTimesHistograms.With(prometheus.Labels{
|
||||
"cache_name": cacheName,
|
||||
"operation": operation,
|
||||
}).Observe(elapsed)
|
||||
}
|
||||
|
||||
func (mi *MetricsInterfaceImpl) IncrementClusterEventType(eventType model.ClusterEvent) {
|
||||
if event, ok := mi.ClusterEventMap[eventType]; ok {
|
||||
event.Inc()
|
||||
|
@ -55,6 +55,7 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/redis/rueidis v1.0.41
|
||||
github.com/reflog/dateconstraints v0.2.1
|
||||
github.com/rs/cors v1.11.0
|
||||
github.com/rudderlabs/analytics-go v3.3.3+incompatible
|
||||
@ -72,7 +73,7 @@ require (
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1
|
||||
github.com/wiggin77/merror v1.0.5
|
||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c
|
||||
github.com/yuin/goldmark v1.7.4
|
||||
github.com/yuin/goldmark v1.4.13
|
||||
golang.org/x/crypto v0.25.0
|
||||
golang.org/x/image v0.18.0
|
||||
golang.org/x/net v0.27.0
|
||||
|
@ -436,6 +436,8 @@ github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DV
|
||||
github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo=
|
||||
github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0=
|
||||
github.com/oov/psd v0.0.0-20220121172623-5db5eafcecbb h1:JF9kOhBBk4WPF7luXFu5yR+WgaFm9L/KiHJHhU9vDwA=
|
||||
github.com/oov/psd v0.0.0-20220121172623-5db5eafcecbb/go.mod h1:GHI1bnmAcbp96z6LNfBJvtrjxhaXGkbsk967utPlvL8=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
@ -493,6 +495,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/redis/go-redis/v9 v9.6.0 h1:NLck+Rab3AOTHw21CGRpvQpgTrAU4sgdCswqGtlhGRA=
|
||||
github.com/redis/go-redis/v9 v9.6.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
|
||||
github.com/redis/rueidis v1.0.41 h1:Ls5Dto11Tknh8CFbaMTSBD0fgMlTqJWq8/Df4LPaeQ0=
|
||||
github.com/redis/rueidis v1.0.41/go.mod h1:bnbkk4+CkXZgDPEbUtSos/o55i4RhFYYesJ4DS2zmq0=
|
||||
github.com/reflog/dateconstraints v0.2.1 h1:Hz1n2Q1vEm0Rj5gciDQcCN1iPBwfFjxUJy32NknGP/s=
|
||||
github.com/reflog/dateconstraints v0.2.1/go.mod h1:Ax8AxTBcJc3E/oVS2hd2j7RDM/5MDtuPwuR7lIHtPLo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
@ -642,9 +646,8 @@ github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMx
|
||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g=
|
||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yuin/goldmark v1.7.4 h1:BDXOHExt+A7gwPCJgPIIq7ENvceR7we7rOS9TNoLZeg=
|
||||
github.com/yuin/goldmark v1.7.4/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
|
||||
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
|
||||
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
|
@ -8530,6 +8530,10 @@
|
||||
"id": "model.config.is_valid.bleve_search.filename.app_error",
|
||||
"translation": "Bleve IndexingDir setting must be set when Bleve EnableIndexing is set to true"
|
||||
},
|
||||
{
|
||||
"id": "model.config.is_valid.cache_type.app_error",
|
||||
"translation": "Cache type must be either lru or redis."
|
||||
},
|
||||
{
|
||||
"id": "model.config.is_valid.cluster_email_batching.app_error",
|
||||
"translation": "Unable to enable email batching when clustering is enabled."
|
||||
@ -8642,6 +8646,10 @@
|
||||
"id": "model.config.is_valid.email_security.app_error",
|
||||
"translation": "Invalid connection security for email settings. Must be '', 'TLS', or 'STARTTLS'."
|
||||
},
|
||||
{
|
||||
"id": "model.config.is_valid.empty_redis_address.app_error",
|
||||
"translation": "RedisAddress must be specified for redis cache type."
|
||||
},
|
||||
{
|
||||
"id": "model.config.is_valid.encrypt_sql.app_error",
|
||||
"translation": "Invalid at rest encrypt key for SQL settings. Must be 32 chars or more."
|
||||
@ -8682,6 +8690,10 @@
|
||||
"id": "model.config.is_valid.import.retention_days_too_low.app_error",
|
||||
"translation": "Invalid value for RetentionDays. Value is too low."
|
||||
},
|
||||
{
|
||||
"id": "model.config.is_valid.invalid_redis_db.app_error",
|
||||
"translation": "Redis DB must have a value greater or equal to zero."
|
||||
},
|
||||
{
|
||||
"id": "model.config.is_valid.ldap_basedn",
|
||||
"translation": "AD/LDAP field \"BaseDN\" is required."
|
||||
|
2
server/platform/services/cache/cache.go
vendored
2
server/platform/services/cache/cache.go
vendored
@ -34,6 +34,8 @@ type Cache interface {
|
||||
// Return ErrKeyNotFound if the key is missing from the cache
|
||||
Get(key string, value any) error
|
||||
|
||||
GetMulti(keys []string, values []any) []error
|
||||
|
||||
// Remove deletes the value for a given key.
|
||||
Remove(key string) error
|
||||
|
||||
|
22
server/platform/services/cache/lru.go
vendored
22
server/platform/services/cache/lru.go
vendored
@ -27,17 +27,6 @@ type LRU struct {
|
||||
invalidateClusterEvent model.ClusterEvent
|
||||
}
|
||||
|
||||
// LRUOptions contains options for initializing LRU cache
|
||||
type LRUOptions struct {
|
||||
Name string
|
||||
Size int
|
||||
DefaultExpiry time.Duration
|
||||
InvalidateClusterEvent model.ClusterEvent
|
||||
// StripedBuckets is used only by LRUStriped and shouldn't be greater than the number
|
||||
// of CPUs available on the machine running this cache.
|
||||
StripedBuckets int
|
||||
}
|
||||
|
||||
// entry is used to hold a value in the evictList.
|
||||
type entry struct {
|
||||
key string
|
||||
@ -47,7 +36,7 @@ type entry struct {
|
||||
}
|
||||
|
||||
// NewLRU creates an LRU of the given size.
|
||||
func NewLRU(opts LRUOptions) Cache {
|
||||
func NewLRU(opts *CacheOptions) Cache {
|
||||
return &LRU{
|
||||
name: opts.Name,
|
||||
size: opts.Size,
|
||||
@ -92,6 +81,15 @@ func (l *LRU) Get(key string, value any) error {
|
||||
return l.get(key, value)
|
||||
}
|
||||
|
||||
func (l *LRU) GetMulti(keys []string, values []any) []error {
|
||||
errs := make([]error, 0, len(values))
|
||||
for i, key := range keys {
|
||||
errs = append(errs, l.get(key, values[i]))
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// Remove deletes the value for a key.
|
||||
func (l *LRU) Remove(key string) error {
|
||||
l.lock.Lock()
|
||||
|
14
server/platform/services/cache/lru_striped.go
vendored
14
server/platform/services/cache/lru_striped.go
vendored
@ -82,6 +82,14 @@ func (L LRUStriped) Get(key string, value any) error {
|
||||
return L.keyBucket(key).Get(key, value)
|
||||
}
|
||||
|
||||
func (L LRUStriped) GetMulti(keys []string, values []any) []error {
|
||||
errs := make([]error, 0, len(values))
|
||||
for i, key := range keys {
|
||||
errs = append(errs, L.keyBucket(key).Get(key, values[i]))
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// Remove does the same as LRU.Remove
|
||||
func (L LRUStriped) Remove(key string) error {
|
||||
return L.keyBucket(key).Remove(key)
|
||||
@ -119,12 +127,12 @@ func (L LRUStriped) Name() string {
|
||||
return L.name
|
||||
}
|
||||
|
||||
// NewLRUStriped creates a striped LRU cache using the special LRUOptions.StripedBuckets value.
|
||||
// See LRUStriped and LRUOptions for more details.
|
||||
// NewLRUStriped creates a striped LRU cache using the special CacheOptions.StripedBuckets value.
|
||||
// See LRUStriped and CacheOptions for more details.
|
||||
//
|
||||
// Not that in order to prevent false eviction, this LRU cache adds 10% (computation is rounded up) of the
|
||||
// requested size to the total cache size.
|
||||
func NewLRUStriped(opts LRUOptions) (Cache, error) {
|
||||
func NewLRUStriped(opts *CacheOptions) (Cache, error) {
|
||||
if opts.StripedBuckets == 0 {
|
||||
return nil, fmt.Errorf("number of buckets is mandatory")
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ const (
|
||||
)
|
||||
|
||||
func BenchmarkLRUStriped(b *testing.B) {
|
||||
opts := cache.LRUOptions{
|
||||
opts := cache.CacheOptions{
|
||||
Name: "",
|
||||
Size: 128,
|
||||
DefaultExpiry: 0,
|
||||
@ -27,7 +27,7 @@ func BenchmarkLRUStriped(b *testing.B) {
|
||||
StripedBuckets: runtime.NumCPU() - 1,
|
||||
}
|
||||
|
||||
cache, err := cache.NewLRUStriped(opts)
|
||||
cache, err := cache.NewLRUStriped(&opts)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ func makeLRUPredictableTestData(num int) [][2]string {
|
||||
}
|
||||
|
||||
func TestNewLRUStriped(t *testing.T) {
|
||||
scache, err := NewLRUStriped(LRUOptions{StripedBuckets: 3, Size: 20})
|
||||
scache, err := NewLRUStriped(&CacheOptions{StripedBuckets: 3, Size: 20})
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := scache.(LRUStriped)
|
||||
@ -41,7 +41,7 @@ func TestNewLRUStriped(t *testing.T) {
|
||||
func TestLRUStripedKeyDistribution(t *testing.T) {
|
||||
dataset := makeLRUPredictableTestData(100)
|
||||
|
||||
scache, err := NewLRUStriped(LRUOptions{StripedBuckets: 4, Size: len(dataset)})
|
||||
scache, err := NewLRUStriped(&CacheOptions{StripedBuckets: 4, Size: len(dataset)})
|
||||
require.NoError(t, err)
|
||||
cache := scache.(LRUStriped)
|
||||
for _, kv := range dataset {
|
||||
@ -66,7 +66,7 @@ func TestLRUStripedKeyDistribution(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLRUStriped_Size(t *testing.T) {
|
||||
scache, err := NewLRUStriped(LRUOptions{StripedBuckets: 2, Size: 128})
|
||||
scache, err := NewLRUStriped(&CacheOptions{StripedBuckets: 2, Size: 128})
|
||||
require.NoError(t, err)
|
||||
cache := scache.(LRUStriped)
|
||||
acc := 0
|
||||
@ -77,7 +77,7 @@ func TestLRUStriped_Size(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLRUStriped_HashKey(t *testing.T) {
|
||||
scache, err := NewLRUStriped(LRUOptions{StripedBuckets: 2, Size: 128})
|
||||
scache, err := NewLRUStriped(&CacheOptions{StripedBuckets: 2, Size: 128})
|
||||
require.NoError(t, err)
|
||||
cache := scache.(LRUStriped)
|
||||
first := cache.hashkeyMapHash("key")
|
||||
@ -87,7 +87,7 @@ func TestLRUStriped_HashKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLRUStriped_Get(t *testing.T) {
|
||||
cache, err := NewLRUStriped(LRUOptions{StripedBuckets: 4, Size: 128})
|
||||
cache, err := NewLRUStriped(&CacheOptions{StripedBuckets: 4, Size: 128})
|
||||
require.NoError(t, err)
|
||||
var out string
|
||||
require.Equal(t, ErrKeyNotFound, cache.Get("key", &out))
|
||||
|
22
server/platform/services/cache/lru_test.go
vendored
22
server/platform/services/cache/lru_test.go
vendored
@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func TestLRU(t *testing.T) {
|
||||
l := NewLRU(LRUOptions{
|
||||
l := NewLRU(&CacheOptions{
|
||||
Size: 128,
|
||||
DefaultExpiry: 0,
|
||||
InvalidateClusterEvent: "",
|
||||
@ -84,7 +84,7 @@ func TestLRU(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLRUExpire(t *testing.T) {
|
||||
l := NewLRU(LRUOptions{
|
||||
l := NewLRU(&CacheOptions{
|
||||
Size: 128,
|
||||
DefaultExpiry: 1 * time.Second,
|
||||
InvalidateClusterEvent: "",
|
||||
@ -106,7 +106,7 @@ func TestLRUExpire(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLRUMarshalUnMarshal(t *testing.T) {
|
||||
l := NewLRU(LRUOptions{
|
||||
l := NewLRU(&CacheOptions{
|
||||
Size: 1,
|
||||
DefaultExpiry: 0,
|
||||
InvalidateClusterEvent: "",
|
||||
@ -295,7 +295,7 @@ func BenchmarkLRU(b *testing.B) {
|
||||
|
||||
b.Run("simple=new", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
l2 := NewLRU(LRUOptions{
|
||||
l2 := NewLRU(&CacheOptions{
|
||||
Size: 1,
|
||||
DefaultExpiry: 0,
|
||||
InvalidateClusterEvent: "",
|
||||
@ -345,7 +345,7 @@ func BenchmarkLRU(b *testing.B) {
|
||||
|
||||
b.Run("complex=new", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
l2 := NewLRU(LRUOptions{
|
||||
l2 := NewLRU(&CacheOptions{
|
||||
Size: 1,
|
||||
DefaultExpiry: 0,
|
||||
InvalidateClusterEvent: "",
|
||||
@ -428,7 +428,7 @@ func BenchmarkLRU(b *testing.B) {
|
||||
|
||||
b.Run("User=new", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
l2 := NewLRU(LRUOptions{
|
||||
l2 := NewLRU(&CacheOptions{
|
||||
Size: 1,
|
||||
DefaultExpiry: 0,
|
||||
InvalidateClusterEvent: "",
|
||||
@ -461,7 +461,7 @@ func BenchmarkLRU(b *testing.B) {
|
||||
|
||||
b.Run("UserMap=new", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
l2 := NewLRU(LRUOptions{
|
||||
l2 := NewLRU(&CacheOptions{
|
||||
Size: 1,
|
||||
DefaultExpiry: 0,
|
||||
InvalidateClusterEvent: "",
|
||||
@ -540,7 +540,7 @@ func BenchmarkLRU(b *testing.B) {
|
||||
|
||||
b.Run("Post=new", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
l2 := NewLRU(LRUOptions{
|
||||
l2 := NewLRU(&CacheOptions{
|
||||
Size: 1,
|
||||
DefaultExpiry: 0,
|
||||
InvalidateClusterEvent: "",
|
||||
@ -564,7 +564,7 @@ func BenchmarkLRU(b *testing.B) {
|
||||
|
||||
b.Run("Status=new", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
l2 := NewLRU(LRUOptions{
|
||||
l2 := NewLRU(&CacheOptions{
|
||||
Size: 1,
|
||||
DefaultExpiry: 0,
|
||||
InvalidateClusterEvent: "",
|
||||
@ -600,7 +600,7 @@ func BenchmarkLRU(b *testing.B) {
|
||||
|
||||
b.Run("Session=new", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
l2 := NewLRU(LRUOptions{
|
||||
l2 := NewLRU(&CacheOptions{
|
||||
Size: 1,
|
||||
DefaultExpiry: 0,
|
||||
InvalidateClusterEvent: "",
|
||||
@ -616,7 +616,7 @@ func BenchmarkLRU(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestLRURace(t *testing.T) {
|
||||
l2 := NewLRU(LRUOptions{
|
||||
l2 := NewLRU(&CacheOptions{
|
||||
Size: 1,
|
||||
DefaultExpiry: 0,
|
||||
InvalidateClusterEvent: "",
|
||||
|
125
server/platform/services/cache/mocks/Cache.go
vendored
125
server/platform/services/cache/mocks/Cache.go
vendored
@ -1,10 +1,13 @@
|
||||
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.42.2. DO NOT EDIT.
|
||||
|
||||
// Regenerate this file using `make cache-mocks`.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
model "github.com/mattermost/mattermost/server/public/model"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
@ -14,11 +17,15 @@ type Cache struct {
|
||||
}
|
||||
|
||||
// Get provides a mock function with given fields: key, value
|
||||
func (_m *Cache) Get(key string, value any) error {
|
||||
func (_m *Cache) Get(key string, value interface{}) error {
|
||||
ret := _m.Called(key, value)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Get")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, any) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(string, interface{}) error); ok {
|
||||
r0 = rf(key, value)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
@ -28,14 +35,38 @@ func (_m *Cache) Get(key string, value any) error {
|
||||
}
|
||||
|
||||
// GetInvalidateClusterEvent provides a mock function with given fields:
|
||||
func (_m *Cache) GetInvalidateClusterEvent() string {
|
||||
func (_m *Cache) GetInvalidateClusterEvent() model.ClusterEvent {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetInvalidateClusterEvent")
|
||||
}
|
||||
|
||||
var r0 model.ClusterEvent
|
||||
if rf, ok := ret.Get(0).(func() model.ClusterEvent); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
r0 = ret.Get(0).(model.ClusterEvent)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetMulti provides a mock function with given fields: keys, values
|
||||
func (_m *Cache) GetMulti(keys []string, values []interface{}) []error {
|
||||
ret := _m.Called(keys, values)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetMulti")
|
||||
}
|
||||
|
||||
var r0 []error
|
||||
if rf, ok := ret.Get(0).(func([]string, []interface{}) []error); ok {
|
||||
r0 = rf(keys, values)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]error)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
@ -45,7 +76,15 @@ func (_m *Cache) GetInvalidateClusterEvent() string {
|
||||
func (_m *Cache) Keys() ([]string, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Keys")
|
||||
}
|
||||
|
||||
var r0 []string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() ([]string, error)); ok {
|
||||
return rf()
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() []string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
@ -54,28 +93,6 @@ func (_m *Cache) Keys() ([]string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Len provides a mock function with given fields:
|
||||
func (_m *Cache) Len() (int, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 int
|
||||
if rf, ok := ret.Get(0).(func() int); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(int)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
@ -89,6 +106,10 @@ func (_m *Cache) Len() (int, error) {
|
||||
func (_m *Cache) Name() string {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Name")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
@ -103,6 +124,10 @@ func (_m *Cache) Name() string {
|
||||
func (_m *Cache) Purge() error {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Purge")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
@ -117,6 +142,10 @@ func (_m *Cache) Purge() error {
|
||||
func (_m *Cache) Remove(key string) error {
|
||||
ret := _m.Called(key)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Remove")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string) error); ok {
|
||||
r0 = rf(key)
|
||||
@ -128,11 +157,15 @@ func (_m *Cache) Remove(key string) error {
|
||||
}
|
||||
|
||||
// Set provides a mock function with given fields: key, value
|
||||
func (_m *Cache) Set(key string, value any) error {
|
||||
func (_m *Cache) Set(key string, value interface{}) error {
|
||||
ret := _m.Called(key, value)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Set")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, any) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(string, interface{}) error); ok {
|
||||
r0 = rf(key, value)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
@ -142,11 +175,15 @@ func (_m *Cache) Set(key string, value any) error {
|
||||
}
|
||||
|
||||
// SetWithDefaultExpiry provides a mock function with given fields: key, value
|
||||
func (_m *Cache) SetWithDefaultExpiry(key string, value any) error {
|
||||
func (_m *Cache) SetWithDefaultExpiry(key string, value interface{}) error {
|
||||
ret := _m.Called(key, value)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SetWithDefaultExpiry")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, any) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(string, interface{}) error); ok {
|
||||
r0 = rf(key, value)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
@ -156,11 +193,15 @@ func (_m *Cache) SetWithDefaultExpiry(key string, value any) error {
|
||||
}
|
||||
|
||||
// SetWithExpiry provides a mock function with given fields: key, value, ttl
|
||||
func (_m *Cache) SetWithExpiry(key string, value any, ttl time.Duration) error {
|
||||
func (_m *Cache) SetWithExpiry(key string, value interface{}, ttl time.Duration) error {
|
||||
ret := _m.Called(key, value, ttl)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SetWithExpiry")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, any, time.Duration) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(string, interface{}, time.Duration) error); ok {
|
||||
r0 = rf(key, value, ttl)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
@ -168,3 +209,17 @@ func (_m *Cache) SetWithExpiry(key string, value any, ttl time.Duration) error {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewCache creates a new instance of Cache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewCache(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *Cache {
|
||||
mock := &Cache{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
87
server/platform/services/cache/mocks/Provider.go
vendored
87
server/platform/services/cache/mocks/Provider.go
vendored
@ -1,11 +1,14 @@
|
||||
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.42.2. DO NOT EDIT.
|
||||
|
||||
// Regenerate this file using `make cache-mocks`.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
einterfaces "github.com/mattermost/mattermost/server/v8/einterfaces"
|
||||
cache "github.com/mattermost/mattermost/server/v8/platform/services/cache"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// Provider is an autogenerated mock type for the Provider type
|
||||
@ -17,6 +20,10 @@ type Provider struct {
|
||||
func (_m *Provider) Close() error {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Close")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
@ -28,24 +35,46 @@ func (_m *Provider) Close() error {
|
||||
}
|
||||
|
||||
// Connect provides a mock function with given fields:
|
||||
func (_m *Provider) Connect() error {
|
||||
func (_m *Provider) Connect() (string, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Connect")
|
||||
}
|
||||
|
||||
return r0
|
||||
var r0 string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() (string, error)); ok {
|
||||
return rf()
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// NewCache provides a mock function with given fields: opts
|
||||
func (_m *Provider) NewCache(opts *cache.CacheOptions) (cache.Cache, error) {
|
||||
ret := _m.Called(opts)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for NewCache")
|
||||
}
|
||||
|
||||
var r0 cache.Cache
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(*cache.CacheOptions) (cache.Cache, error)); ok {
|
||||
return rf(opts)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(*cache.CacheOptions) cache.Cache); ok {
|
||||
r0 = rf(opts)
|
||||
} else {
|
||||
@ -54,7 +83,6 @@ func (_m *Provider) NewCache(opts *cache.CacheOptions) (cache.Cache, error) {
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(*cache.CacheOptions) error); ok {
|
||||
r1 = rf(opts)
|
||||
} else {
|
||||
@ -63,3 +91,40 @@ func (_m *Provider) NewCache(opts *cache.CacheOptions) (cache.Cache, error) {
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetMetrics provides a mock function with given fields: metrics
|
||||
func (_m *Provider) SetMetrics(metrics einterfaces.MetricsInterface) {
|
||||
_m.Called(metrics)
|
||||
}
|
||||
|
||||
// Type provides a mock function with given fields:
|
||||
func (_m *Provider) Type() string {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Type")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func() string); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewProvider(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *Provider {
|
||||
mock := &Provider{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
98
server/platform/services/cache/provider.go
vendored
98
server/platform/services/cache/provider.go
vendored
@ -4,9 +4,13 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/v8/einterfaces"
|
||||
"github.com/redis/rueidis"
|
||||
)
|
||||
|
||||
// CacheOptions contains options for initializing a cache
|
||||
@ -16,7 +20,9 @@ type CacheOptions struct {
|
||||
Name string
|
||||
InvalidateClusterEvent model.ClusterEvent
|
||||
Striped bool
|
||||
StripedBuckets int
|
||||
// StripedBuckets is used only by LRUStriped and shouldn't be greater than the number
|
||||
// of CPUs available on the machine running this cache.
|
||||
StripedBuckets int
|
||||
}
|
||||
|
||||
// Provider is a provider for Cache
|
||||
@ -24,9 +30,14 @@ type Provider interface {
|
||||
// NewCache creates a new cache with given options.
|
||||
NewCache(opts *CacheOptions) (Cache, error)
|
||||
// Connect opens a new connection to the cache using specific provider parameters.
|
||||
Connect() error
|
||||
// The returned string contains the status of the response from the cache backend.
|
||||
Connect() (string, error)
|
||||
// SetMetrics
|
||||
SetMetrics(metrics einterfaces.MetricsInterface)
|
||||
// Close releases any resources used by the cache provider.
|
||||
Close() error
|
||||
// Type returns what type of cache it generates.
|
||||
Type() string
|
||||
}
|
||||
|
||||
type cacheProvider struct {
|
||||
@ -40,28 +51,81 @@ func NewProvider() Provider {
|
||||
// NewCache creates a new cache with given opts
|
||||
func (c *cacheProvider) NewCache(opts *CacheOptions) (Cache, error) {
|
||||
if opts.Striped {
|
||||
return NewLRUStriped(LRUOptions{
|
||||
Name: opts.Name,
|
||||
Size: opts.Size,
|
||||
DefaultExpiry: opts.DefaultExpiry,
|
||||
InvalidateClusterEvent: opts.InvalidateClusterEvent,
|
||||
StripedBuckets: opts.StripedBuckets,
|
||||
})
|
||||
return NewLRUStriped(opts)
|
||||
}
|
||||
return NewLRU(LRUOptions{
|
||||
Name: opts.Name,
|
||||
Size: opts.Size,
|
||||
DefaultExpiry: opts.DefaultExpiry,
|
||||
InvalidateClusterEvent: opts.InvalidateClusterEvent,
|
||||
}), nil
|
||||
return NewLRU(opts), nil
|
||||
}
|
||||
|
||||
// Connect opens a new connection to the cache using specific provider parameters.
|
||||
func (c *cacheProvider) Connect() error {
|
||||
return nil
|
||||
func (c *cacheProvider) Connect() (string, error) {
|
||||
return "OK", nil
|
||||
}
|
||||
|
||||
func (c *cacheProvider) SetMetrics(metrics einterfaces.MetricsInterface) {
|
||||
}
|
||||
|
||||
// Close releases any resources used by the cache provider.
|
||||
func (c *cacheProvider) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cacheProvider) Type() string {
|
||||
return model.CacheTypeLRU
|
||||
}
|
||||
|
||||
type redisProvider struct {
|
||||
client rueidis.Client
|
||||
metrics einterfaces.MetricsInterface
|
||||
}
|
||||
|
||||
type RedisOptions struct {
|
||||
RedisAddr string
|
||||
RedisPassword string
|
||||
RedisDB int
|
||||
}
|
||||
|
||||
// NewProvider creates a new CacheProvider
|
||||
func NewRedisProvider(opts *RedisOptions) (Provider, error) {
|
||||
client, err := rueidis.NewClient(rueidis.ClientOption{
|
||||
InitAddress: []string{opts.RedisAddr},
|
||||
Password: opts.RedisPassword,
|
||||
SelectDB: opts.RedisDB,
|
||||
ForceSingleClient: true,
|
||||
CacheSizeEachConn: 16 * (1 << 20), // 16MiB local cache size
|
||||
//TODO: look into MaxFlushDelay
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &redisProvider{client: client}, nil
|
||||
}
|
||||
|
||||
// NewCache creates a new cache with given opts
|
||||
func (r *redisProvider) NewCache(opts *CacheOptions) (Cache, error) {
|
||||
rr, err := NewRedis(opts, r.client)
|
||||
rr.metrics = r.metrics
|
||||
return rr, err
|
||||
}
|
||||
|
||||
// Connect opens a new connection to the cache using specific provider parameters.
|
||||
func (r *redisProvider) Connect() (string, error) {
|
||||
res, err := r.client.Do(context.Background(), r.client.B().Ping().Build()).ToString()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to establish connection with redis: %v", err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *redisProvider) SetMetrics(metrics einterfaces.MetricsInterface) {
|
||||
r.metrics = metrics
|
||||
}
|
||||
|
||||
func (r *redisProvider) Type() string {
|
||||
return model.CacheTypeRedis
|
||||
}
|
||||
|
||||
// Close releases any resources used by the cache provider.
|
||||
func (r *redisProvider) Close() error {
|
||||
r.client.Close()
|
||||
return nil
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ func TestNewCache_Striped(t *testing.T) {
|
||||
func TestConnectClose(t *testing.T) {
|
||||
p := NewProvider()
|
||||
|
||||
err := p.Connect()
|
||||
_, err := p.Connect()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = p.Close()
|
||||
|
287
server/platform/services/cache/redis.go
vendored
Normal file
287
server/platform/services/cache/redis.go
vendored
Normal file
@ -0,0 +1,287 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/mattermost/mattermost/server/public/model"
|
||||
"github.com/mattermost/mattermost/server/v8/einterfaces"
|
||||
|
||||
"github.com/redis/rueidis"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"github.com/vmihailenco/msgpack/v5"
|
||||
)
|
||||
|
||||
const clientSideTTL = 5 * time.Minute
|
||||
|
||||
type Redis struct {
|
||||
name string
|
||||
client rueidis.Client
|
||||
defaultExpiry time.Duration
|
||||
metrics einterfaces.MetricsInterface
|
||||
}
|
||||
|
||||
func NewRedis(opts *CacheOptions, client rueidis.Client) (*Redis, error) {
|
||||
if opts.Name == "" {
|
||||
return nil, errors.New("no name specified for cache")
|
||||
}
|
||||
return &Redis{
|
||||
name: opts.Name,
|
||||
defaultExpiry: opts.DefaultExpiry,
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Redis) Purge() error {
|
||||
// TODO: move to scan
|
||||
keys, err := r.Keys()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.client.Do(context.Background(),
|
||||
r.client.B().Del().
|
||||
Key(keys...).
|
||||
Build(),
|
||||
).Error()
|
||||
}
|
||||
|
||||
func (r *Redis) Set(key string, value any) error {
|
||||
return r.SetWithExpiry(key, value, 0)
|
||||
}
|
||||
|
||||
// SetWithDefaultExpiry adds the given key and value to the store with the default expiry. If
|
||||
// the key already exists, it will overwrite the previous value
|
||||
func (r *Redis) SetWithDefaultExpiry(key string, value any) error {
|
||||
return r.SetWithExpiry(key, value, r.defaultExpiry)
|
||||
}
|
||||
|
||||
// SetWithExpiry adds the given key and value to the cache with the given expiry. If the key
|
||||
// already exists, it will overwrite the previous value
|
||||
func (r *Redis) SetWithExpiry(key string, value any, ttl time.Duration) error {
|
||||
now := time.Now()
|
||||
defer func() {
|
||||
if r.metrics != nil {
|
||||
elapsed := time.Since(now).Seconds()
|
||||
r.metrics.ObserveRedisEndpointDuration(r.name, "Set", elapsed)
|
||||
}
|
||||
}()
|
||||
var buf []byte
|
||||
var err error
|
||||
// We use a fast path for hot structs.
|
||||
if msgpVal, ok := value.(msgp.Marshaler); ok {
|
||||
buf, err = msgpVal.MarshalMsg(nil)
|
||||
} else {
|
||||
// Slow path for other structs.
|
||||
buf, err = msgpack.Marshal(value)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.client.Do(context.Background(),
|
||||
r.client.B().Set().
|
||||
Key(r.name+":"+key).
|
||||
Value(rueidis.BinaryString(buf)).
|
||||
Ex(ttl).
|
||||
Build(),
|
||||
).Error()
|
||||
}
|
||||
|
||||
// Get the content stored in the cache for the given key, and decode it into the value interface.
|
||||
// Return ErrKeyNotFound if the key is missing from the cache
|
||||
func (r *Redis) Get(key string, value any) error {
|
||||
now := time.Now()
|
||||
defer func() {
|
||||
if r.metrics != nil {
|
||||
elapsed := time.Since(now).Seconds()
|
||||
r.metrics.ObserveRedisEndpointDuration(r.name, "Get", elapsed)
|
||||
}
|
||||
}()
|
||||
val, err := r.client.DoCache(context.Background(),
|
||||
r.client.B().Get().
|
||||
Key(r.name+":"+key).
|
||||
Cache(),
|
||||
clientSideTTL,
|
||||
).AsBytes()
|
||||
if err != nil {
|
||||
if rueidis.IsRedisNil(err) {
|
||||
return ErrKeyNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// We use a fast path for hot structs.
|
||||
if msgpVal, ok := value.(msgp.Unmarshaler); ok {
|
||||
_, err := msgpVal.UnmarshalMsg(val)
|
||||
return err
|
||||
}
|
||||
|
||||
// This is ugly and makes the cache package aware of the model package.
|
||||
// But this is due to 2 things.
|
||||
// 1. The msgp package works on methods on structs rather than functions.
|
||||
// 2. Our cache interface passes pointers to empty pointers, and not pointers
|
||||
// to values. This is mainly how all our model structs are passed around.
|
||||
// It might be technically possible to use values _just_ for hot structs
|
||||
// like these and then return a pointer while returning from the cache function,
|
||||
// but it will make the codebase inconsistent, and has some edge-cases to take care of.
|
||||
switch v := value.(type) {
|
||||
case **model.User:
|
||||
var u model.User
|
||||
_, err := u.UnmarshalMsg(val)
|
||||
*v = &u
|
||||
return err
|
||||
case *map[string]*model.User:
|
||||
var u model.UserMap
|
||||
_, err := u.UnmarshalMsg(val)
|
||||
*v = u
|
||||
return err
|
||||
}
|
||||
|
||||
// Slow path for other structs.
|
||||
return msgpack.Unmarshal(val, value)
|
||||
}
|
||||
|
||||
func (r *Redis) GetMulti(keys []string, values []any) []error {
|
||||
now := time.Now()
|
||||
defer func() {
|
||||
if r.metrics != nil {
|
||||
elapsed := time.Since(now).Seconds()
|
||||
r.metrics.ObserveRedisEndpointDuration(r.name, "GetMulti", elapsed)
|
||||
}
|
||||
}()
|
||||
|
||||
errs := make([]error, len(keys))
|
||||
newKeys := make([]string, len(keys))
|
||||
for i := range keys {
|
||||
newKeys[i] = r.name + ":" + keys[i]
|
||||
}
|
||||
vals, err := r.client.DoCache(context.Background(),
|
||||
r.client.B().Mget().
|
||||
Key(newKeys...).
|
||||
Cache(),
|
||||
clientSideTTL,
|
||||
).ToArray()
|
||||
if err != nil {
|
||||
for i := range errs {
|
||||
errs[i] = err
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
if len(vals) != len(keys) {
|
||||
for i := range errs {
|
||||
errs[i] = fmt.Errorf("length of returned vals %d, does not match length of keys %d", len(vals), len(keys))
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
for i, val := range vals {
|
||||
if val.IsNil() {
|
||||
errs[i] = ErrKeyNotFound
|
||||
continue
|
||||
}
|
||||
|
||||
buf, err := val.AsBytes()
|
||||
if err != nil {
|
||||
errs[i] = err
|
||||
continue
|
||||
}
|
||||
|
||||
// We use a fast path for hot structs.
|
||||
if msgpVal, ok := values[i].(msgp.Unmarshaler); ok {
|
||||
_, err := msgpVal.UnmarshalMsg(buf)
|
||||
errs[i] = err
|
||||
continue
|
||||
}
|
||||
|
||||
switch v := values[i].(type) {
|
||||
case **model.User:
|
||||
var u model.User
|
||||
_, err := u.UnmarshalMsg(buf)
|
||||
*v = &u
|
||||
errs[i] = err
|
||||
continue
|
||||
case *map[string]*model.User:
|
||||
var u model.UserMap
|
||||
_, err := u.UnmarshalMsg(buf)
|
||||
*v = u
|
||||
errs[i] = err
|
||||
continue
|
||||
}
|
||||
|
||||
// Slow path for other structs.
|
||||
errs[i] = msgpack.Unmarshal(buf, values[i])
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// Remove deletes the value for a given key.
|
||||
func (r *Redis) Remove(key string) error {
|
||||
now := time.Now()
|
||||
defer func() {
|
||||
if r.metrics != nil {
|
||||
elapsed := time.Since(now).Seconds()
|
||||
r.metrics.ObserveRedisEndpointDuration(r.name, "Del", elapsed)
|
||||
}
|
||||
}()
|
||||
|
||||
return r.client.Do(context.Background(),
|
||||
r.client.B().Del().
|
||||
Key(r.name+":"+key).
|
||||
Build(),
|
||||
).Error()
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache.
|
||||
func (r *Redis) Keys() ([]string, error) {
|
||||
now := time.Now()
|
||||
defer func() {
|
||||
if r.metrics != nil {
|
||||
elapsed := time.Since(now).Seconds()
|
||||
r.metrics.ObserveRedisEndpointDuration(r.name, "Keys", elapsed)
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO: migrate to a function that works on a batch of keys.
|
||||
return r.client.Do(context.Background(),
|
||||
r.client.B().Keys().
|
||||
Pattern(r.name+":*").
|
||||
Build(),
|
||||
).AsStrSlice()
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (r *Redis) Len() (int, error) {
|
||||
now := time.Now()
|
||||
defer func() {
|
||||
if r.metrics != nil {
|
||||
elapsed := time.Since(now).Seconds()
|
||||
r.metrics.ObserveRedisEndpointDuration(r.name, "Len", elapsed)
|
||||
}
|
||||
}()
|
||||
// TODO: migrate to scan
|
||||
keys, err := r.client.Do(context.Background(),
|
||||
r.client.B().Keys().
|
||||
Pattern(r.name+":*").
|
||||
Build(),
|
||||
).AsStrSlice()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(keys), nil
|
||||
}
|
||||
|
||||
// GetInvalidateClusterEvent returns the cluster event configured when this cache was created.
|
||||
func (r *Redis) GetInvalidateClusterEvent() model.ClusterEvent {
|
||||
return model.ClusterEventNone
|
||||
}
|
||||
|
||||
func (r *Redis) Name() string {
|
||||
return r.name
|
||||
}
|
@ -6,6 +6,7 @@ package model
|
||||
type ClusterEvent string
|
||||
|
||||
const (
|
||||
ClusterEventNone ClusterEvent = "none"
|
||||
ClusterEventPublish ClusterEvent = "publish"
|
||||
ClusterEventUpdateStatus ClusterEvent = "update_status"
|
||||
ClusterEventInvalidateAllCaches ClusterEvent = "inv_all_caches"
|
||||
|
@ -98,6 +98,9 @@ const (
|
||||
EmailSMTPDefaultServer = "localhost"
|
||||
EmailSMTPDefaultPort = "10025"
|
||||
|
||||
CacheTypeLRU = "lru"
|
||||
CacheTypeRedis = "redis"
|
||||
|
||||
SitenameMaxLength = 30
|
||||
|
||||
ServiceSettingsDefaultSiteURL = "http://localhost:8065"
|
||||
@ -929,6 +932,47 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
|
||||
}
|
||||
}
|
||||
|
||||
type CacheSettings struct {
|
||||
CacheType *string `access:",write_restrictable,cloud_restrictable"`
|
||||
RedisAddress *string `access:",write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
RedisPassword *string `access:",write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
RedisDB *int `access:",write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
}
|
||||
|
||||
func (s *CacheSettings) SetDefaults() {
|
||||
if s.CacheType == nil {
|
||||
s.CacheType = NewString(CacheTypeLRU)
|
||||
}
|
||||
|
||||
if s.RedisAddress == nil {
|
||||
s.RedisAddress = NewString("")
|
||||
}
|
||||
|
||||
if s.RedisPassword == nil {
|
||||
s.RedisPassword = NewString("")
|
||||
}
|
||||
|
||||
if s.RedisDB == nil {
|
||||
s.RedisDB = NewInt(-1)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CacheSettings) isValid() *AppError {
|
||||
if *s.CacheType != CacheTypeLRU && *s.CacheType != CacheTypeRedis {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.cache_type.app_error", nil, "", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if *s.CacheType == CacheTypeRedis && *s.RedisAddress == "" {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.empty_redis_address.app_error", nil, "", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if *s.CacheType == CacheTypeRedis && *s.RedisDB < 0 {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.invalid_redis_db.app_error", nil, "", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ClusterSettings struct {
|
||||
Enable *bool `access:"environment_high_availability,write_restrictable"`
|
||||
ClusterName *string `access:"environment_high_availability,write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
@ -3496,6 +3540,7 @@ type Config struct {
|
||||
LocalizationSettings LocalizationSettings
|
||||
SamlSettings SamlSettings
|
||||
NativeAppSettings NativeAppSettings
|
||||
CacheSettings CacheSettings
|
||||
ClusterSettings ClusterSettings
|
||||
MetricsSettings MetricsSettings
|
||||
ExperimentalSettings ExperimentalSettings
|
||||
@ -3603,6 +3648,7 @@ func (o *Config) SetDefaults() {
|
||||
o.SupportSettings.SetDefaults()
|
||||
o.AnnouncementSettings.SetDefaults()
|
||||
o.ThemeSettings.SetDefaults()
|
||||
o.CacheSettings.SetDefaults()
|
||||
o.ClusterSettings.SetDefaults()
|
||||
o.PluginSettings.SetDefaults(o.LogSettings)
|
||||
o.AnalyticsSettings.SetDefaults()
|
||||
@ -3640,6 +3686,10 @@ func (o *Config) IsValid() *AppError {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.cluster_email_batching.app_error", nil, "", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if appErr := o.CacheSettings.isValid(); appErr != nil {
|
||||
return appErr
|
||||
}
|
||||
|
||||
if *o.ServiceSettings.SiteURL == "" && *o.ServiceSettings.AllowCookiesForSubdomains {
|
||||
return NewAppError("Config.IsValid", "model.config.is_valid.allow_cookies_for_subdomains.app_error", nil, "", http.StatusBadRequest)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user