2019-05-02 17:17:27 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2018-03-15 16:10:45 -05:00
|
|
|
module CachedCounting
|
|
|
|
extend ActiveSupport::Concern
|
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
LUA_HGET_DEL = DiscourseRedis::EvalHelper.new <<~LUA
|
|
|
|
local result = redis.call("HGET", KEYS[1], KEYS[2])
|
|
|
|
redis.call("HDEL", KEYS[1], KEYS[2])
|
2022-02-15 10:55:21 -06:00
|
|
|
|
|
|
|
return result
|
|
|
|
LUA
|
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
QUEUE = Queue.new
|
|
|
|
SLEEP_SECONDS = 1
|
|
|
|
FLUSH_DB_ITERATIONS = 60
|
|
|
|
MUTEX = Mutex.new
|
|
|
|
|
|
|
|
def self.disable
|
|
|
|
@enabled = false
|
|
|
|
if @thread && @thread.alive?
|
|
|
|
@thread.wakeup
|
|
|
|
@thread.join
|
2018-03-15 16:10:45 -05:00
|
|
|
end
|
2022-02-22 10:45:25 -06:00
|
|
|
end
|
2018-03-15 16:10:45 -05:00
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
def self.enabled?
|
|
|
|
@enabled != false
|
|
|
|
end
|
2018-03-15 16:10:45 -05:00
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
def self.enable
|
|
|
|
@enabled = true
|
|
|
|
end
|
2018-03-15 16:10:45 -05:00
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
def self.reset
|
|
|
|
@last_ensure_thread = nil
|
|
|
|
clear_queue!
|
|
|
|
clear_flush_to_db_lock!
|
2018-03-15 16:10:45 -05:00
|
|
|
end
|
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
ENSURE_THREAD_COOLDOWN_SECONDS = 5
|
2018-03-15 16:10:45 -05:00
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
def self.ensure_thread!
|
|
|
|
return if !enabled?
|
2018-03-15 16:10:45 -05:00
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
MUTEX.synchronize do
|
|
|
|
now = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
|
|
delta = @last_ensure_thread && (now - @last_ensure_thread)
|
|
|
|
|
|
|
|
if delta && delta < ENSURE_THREAD_COOLDOWN_SECONDS
|
|
|
|
# creating threads can be very expensive and bog down a process
|
2018-03-15 16:10:45 -05:00
|
|
|
return
|
|
|
|
end
|
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
@last_ensure_thread = now
|
|
|
|
|
2023-01-09 06:20:10 -06:00
|
|
|
@thread = nil if !@thread&.alive?
|
2022-02-22 10:45:25 -06:00
|
|
|
@thread ||= Thread.new { thread_loop }
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.thread_loop
|
|
|
|
iterations = 0
|
|
|
|
while true
|
|
|
|
break if !enabled?
|
|
|
|
|
|
|
|
sleep SLEEP_SECONDS
|
|
|
|
flush_in_memory
|
|
|
|
if (iterations >= FLUSH_DB_ITERATIONS) || @flush
|
|
|
|
iterations = 0
|
|
|
|
flush_to_db
|
|
|
|
@flush = false
|
2018-03-15 16:10:45 -05:00
|
|
|
end
|
2022-02-22 10:45:25 -06:00
|
|
|
iterations += 1
|
2018-03-15 16:10:45 -05:00
|
|
|
end
|
2022-02-22 10:45:25 -06:00
|
|
|
rescue => ex
|
|
|
|
if Redis::CommandError === ex && ex.message =~ /READONLY/
|
|
|
|
# do not warn for Redis readonly mode
|
|
|
|
elsif PG::ReadOnlySqlTransaction === ex
|
|
|
|
# do not warn for PG readonly mode
|
|
|
|
else
|
2023-01-09 06:20:10 -06:00
|
|
|
Discourse.warn_exception(ex, message: "Unexpected error while processing cached counts")
|
2018-03-15 16:10:45 -05:00
|
|
|
end
|
2022-02-22 10:45:25 -06:00
|
|
|
end
|
2018-03-15 16:10:45 -05:00
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
def self.flush
|
|
|
|
@flush = true
|
|
|
|
@thread.wakeup
|
2023-01-09 06:20:10 -06:00
|
|
|
sleep 0.001 while @flush
|
2022-02-22 10:45:25 -06:00
|
|
|
end
|
|
|
|
|
|
|
|
COUNTER_REDIS_HASH = "CounterCacheHash"
|
2018-03-15 16:10:45 -05:00
|
|
|
|
2022-02-22 10:45:25 -06:00
|
|
|
def self.flush_in_memory
|
|
|
|
counts = nil
|
|
|
|
while QUEUE.length > 0
|
|
|
|
# only 1 consumer, no need to avoid blocking
|
|
|
|
key, klass, db, time = QUEUE.deq
|
|
|
|
_redis_key = "#{klass},#{db},#{time.strftime("%Y%m%d")},#{key}"
|
|
|
|
counts ||= Hash.new(0)
|
|
|
|
counts[_redis_key] += 1
|
|
|
|
end
|
|
|
|
|
|
|
|
if counts
|
|
|
|
counts.each do |redis_key, count|
|
|
|
|
Discourse.redis.without_namespace.hincrby(COUNTER_REDIS_HASH, redis_key, count)
|
2018-03-15 16:10:45 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2022-02-22 10:45:25 -06:00
|
|
|
|
|
|
|
DB_FLUSH_COOLDOWN_SECONDS = 60
|
|
|
|
DB_COOLDOWN_KEY = "cached_counting_cooldown"
|
|
|
|
|
|
|
|
def self.flush_to_db
|
|
|
|
redis = Discourse.redis.without_namespace
|
|
|
|
DistributedMutex.synchronize("flush_counters_to_db", redis: redis, validity: 5.minutes) do
|
|
|
|
if allowed_to_flush_to_db?
|
2023-01-09 06:20:10 -06:00
|
|
|
redis
|
|
|
|
.hkeys(COUNTER_REDIS_HASH)
|
|
|
|
.each do |key|
|
|
|
|
val = LUA_HGET_DEL.eval(redis, [COUNTER_REDIS_HASH, key]).to_i
|
|
|
|
|
|
|
|
# unlikely (protected by mutex), but protect just in case
|
|
|
|
# could be a race condition in test
|
|
|
|
if val > 0
|
|
|
|
klass_name, db, date, local_key = key.split(",", 4)
|
|
|
|
date = Date.strptime(date, "%Y%m%d")
|
|
|
|
klass = Module.const_get(klass_name)
|
|
|
|
|
|
|
|
RailsMultisite::ConnectionManagement.with_connection(db) do
|
|
|
|
klass.write_cache!(local_key, val, date)
|
|
|
|
end
|
2022-02-22 10:45:25 -06:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.clear_flush_to_db_lock!
|
|
|
|
Discourse.redis.without_namespace.del(DB_COOLDOWN_KEY)
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.flush_to_db_lock_ttl
|
|
|
|
Discourse.redis.without_namespace.ttl(DB_COOLDOWN_KEY)
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.allowed_to_flush_to_db?
|
2023-01-09 06:20:10 -06:00
|
|
|
Discourse.redis.without_namespace.set(
|
|
|
|
DB_COOLDOWN_KEY,
|
|
|
|
"1",
|
|
|
|
ex: DB_FLUSH_COOLDOWN_SECONDS,
|
|
|
|
nx: true,
|
|
|
|
)
|
2022-02-22 10:45:25 -06:00
|
|
|
end
|
|
|
|
|
|
|
|
def self.queue(key, klass)
|
|
|
|
QUEUE.push([key, klass, RailsMultisite::ConnectionManagement.current_db, Time.now.utc])
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.clear_queue!
|
|
|
|
QUEUE.clear
|
|
|
|
redis = Discourse.redis.without_namespace
|
|
|
|
redis.del(COUNTER_REDIS_HASH)
|
|
|
|
end
|
|
|
|
|
|
|
|
class_methods do
|
|
|
|
def perform_increment!(key)
|
|
|
|
CachedCounting.ensure_thread!
|
|
|
|
CachedCounting.queue(key, self)
|
|
|
|
end
|
|
|
|
|
|
|
|
def write_cache!(key, count, date)
|
|
|
|
raise NotImplementedError
|
|
|
|
end
|
|
|
|
end
|
2018-03-15 16:10:45 -05:00
|
|
|
end
|