2014-02-12 22:32:58 -06:00
|
|
|
require_dependency "export/exporter"
|
|
|
|
require_dependency "import/importer"
|
|
|
|
|
|
|
|
module BackupRestore
|
|
|
|
|
|
|
|
class OperationRunningError < RuntimeError; end
|
|
|
|
|
|
|
|
DUMP_FILE = "dump.sql"
|
|
|
|
METADATA_FILE = "meta.json"
|
2014-02-13 12:41:46 -06:00
|
|
|
LOGS_CHANNEL = "/admin/backups/logs"
|
2014-02-12 22:32:58 -06:00
|
|
|
|
|
|
|
def self.backup!(user_id, publish_to_message_bus = false)
|
|
|
|
exporter = Export::Exporter.new(user_id, publish_to_message_bus)
|
|
|
|
start! exporter
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.restore!(user_id, filename, publish_to_message_bus = false)
|
|
|
|
importer = Import::Importer.new(user_id, filename, publish_to_message_bus)
|
|
|
|
start! importer
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.rollback!
|
|
|
|
raise BackupRestore::OperationRunningError if BackupRestore.is_operation_running?
|
|
|
|
if can_rollback?
|
2014-02-19 08:25:31 -06:00
|
|
|
move_tables_between_schemas("backup", "public")
|
2014-02-13 11:11:44 -06:00
|
|
|
after_fork
|
2014-02-12 22:32:58 -06:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.cancel!
|
|
|
|
set_shutdown_signal!
|
|
|
|
true
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.mark_as_running!
|
2014-02-13 12:41:46 -06:00
|
|
|
# TODO: for more safety, it should acquire a lock
|
|
|
|
# and raise an exception if already running!
|
2014-02-12 22:32:58 -06:00
|
|
|
$redis.set(running_key, "1")
|
2014-02-13 12:41:46 -06:00
|
|
|
save_start_logs_message_id
|
2014-02-12 22:32:58 -06:00
|
|
|
end
|
|
|
|
|
|
|
|
def self.is_operation_running?
|
|
|
|
!!$redis.get(running_key)
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.mark_as_not_running!
|
|
|
|
$redis.del(running_key)
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.should_shutdown?
|
|
|
|
!!$redis.get(shutdown_signal_key)
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.can_rollback?
|
|
|
|
backup_tables_count > 0
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.operations_status
|
|
|
|
{
|
|
|
|
is_operation_running: is_operation_running?,
|
|
|
|
can_rollback: can_rollback?,
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
2014-02-13 12:41:46 -06:00
|
|
|
def self.logs
|
|
|
|
id = start_logs_message_id
|
|
|
|
MessageBus.backlog(LOGS_CHANNEL, id).map { |m| m.data }
|
|
|
|
end
|
|
|
|
|
2014-02-12 22:32:58 -06:00
|
|
|
def self.current_version
|
|
|
|
ActiveRecord::Migrator.current_version
|
|
|
|
end
|
|
|
|
|
2014-02-19 08:25:31 -06:00
|
|
|
def self.move_tables_between_schemas(source, destination)
|
|
|
|
User.exec_sql(move_tables_between_schemas_sql(source, destination))
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.move_tables_between_schemas_sql(source, destination)
|
|
|
|
# TODO: Postgres 9.3 has "CREATE SCHEMA schema IF NOT EXISTS;"
|
|
|
|
<<-SQL
|
|
|
|
DO $$DECLARE row record;
|
|
|
|
BEGIN
|
|
|
|
-- create "destination" schema if it does not exists already
|
|
|
|
-- NOTE: DROP & CREATE SCHEMA is easier, but we don't wont to drop the public schema
|
|
|
|
-- ortherwise extensions (like hstore & pg_trgm) won't work anymore
|
|
|
|
IF NOT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = '#{destination}')
|
|
|
|
THEN
|
|
|
|
CREATE SCHEMA #{destination};
|
|
|
|
END IF;
|
|
|
|
-- move all "source" tables to "destination" schema
|
|
|
|
FOR row IN SELECT tablename FROM pg_tables WHERE schemaname = '#{source}'
|
|
|
|
LOOP
|
2014-02-20 11:42:17 -06:00
|
|
|
EXECUTE 'DROP TABLE IF EXISTS #{destination}.' || quote_ident(row.tablename) || ' CASCADE;';
|
2014-02-19 08:25:31 -06:00
|
|
|
EXECUTE 'ALTER TABLE #{source}.' || quote_ident(row.tablename) || ' SET SCHEMA #{destination};';
|
|
|
|
END LOOP;
|
|
|
|
END$$;
|
2014-02-12 22:32:58 -06:00
|
|
|
SQL
|
2014-02-19 08:25:31 -06:00
|
|
|
end
|
|
|
|
|
|
|
|
DatabaseConfiguration = Struct.new(:host, :username, :password, :database)
|
2014-02-12 22:32:58 -06:00
|
|
|
|
2014-02-19 08:25:31 -06:00
|
|
|
def self.database_configuration
|
2014-02-20 12:11:43 -06:00
|
|
|
config = Rails.env.production? ? ActiveRecord::Base.connection_pool.spec.config : Rails.configuration.database_configuration[Rails.env]
|
|
|
|
config = config.with_indifferent_access
|
|
|
|
|
|
|
|
DatabaseConfiguration.new(
|
|
|
|
config["host"],
|
|
|
|
config["username"] || ENV["USER"] || "postgres",
|
|
|
|
config["password"],
|
|
|
|
config["database"]
|
|
|
|
)
|
2014-02-12 22:32:58 -06:00
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
|
|
|
def self.running_key
|
|
|
|
"backup_restore_operation_is_running"
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.shutdown_signal_key
|
|
|
|
"backup_restore_operation_should_shutdown"
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.set_shutdown_signal!
|
|
|
|
$redis.set(shutdown_signal_key, "1")
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.clear_shutdown_signal!
|
|
|
|
$redis.del(shutdown_signal_key)
|
|
|
|
end
|
|
|
|
|
2014-02-13 12:41:46 -06:00
|
|
|
def self.save_start_logs_message_id
|
|
|
|
id = MessageBus.last_id(LOGS_CHANNEL)
|
|
|
|
$redis.set(start_logs_message_id_key, id)
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.start_logs_message_id
|
|
|
|
$redis.get(start_logs_message_id_key).to_i
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.start_logs_message_id_key
|
|
|
|
"start_logs_message_id"
|
|
|
|
end
|
|
|
|
|
2014-02-12 22:32:58 -06:00
|
|
|
def self.start!(runner)
|
|
|
|
child = fork do
|
|
|
|
begin
|
|
|
|
after_fork
|
|
|
|
runner.run
|
|
|
|
rescue Exception => e
|
|
|
|
puts "--------------------------------------------"
|
|
|
|
puts "---------------- EXCEPTION -----------------"
|
|
|
|
puts e.message
|
|
|
|
puts e.backtrace.join("\n")
|
|
|
|
puts "--------------------------------------------"
|
|
|
|
ensure
|
|
|
|
begin
|
|
|
|
clear_shutdown_signal!
|
|
|
|
rescue Exception => e
|
|
|
|
puts "============================================"
|
|
|
|
puts "================ EXCEPTION ================="
|
|
|
|
puts e.message
|
|
|
|
puts e.backtrace.join("\n")
|
|
|
|
puts "============================================"
|
|
|
|
ensure
|
|
|
|
exit!(0)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
Process.detach(child)
|
|
|
|
|
|
|
|
true
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.after_fork
|
2014-02-13 17:27:25 -06:00
|
|
|
# reconnect to redis
|
2014-02-12 22:32:58 -06:00
|
|
|
$redis.client.reconnect
|
2014-02-13 17:27:25 -06:00
|
|
|
# reconnect the rails cache (uses redis)
|
2014-02-12 22:32:58 -06:00
|
|
|
Rails.cache.reconnect
|
2014-02-13 17:27:25 -06:00
|
|
|
# tells the message we've forked
|
2014-02-12 22:32:58 -06:00
|
|
|
MessageBus.after_fork
|
2014-02-13 17:27:25 -06:00
|
|
|
# /!\ HACK /!\ force sidekiq to create a new connection to redis
|
2014-02-13 04:44:24 -06:00
|
|
|
Sidekiq.instance_variable_set(:@redis, nil)
|
2014-02-12 22:32:58 -06:00
|
|
|
end
|
|
|
|
|
|
|
|
def self.backup_tables_count
|
|
|
|
User.exec_sql("SELECT COUNT(*) AS count FROM information_schema.tables WHERE table_schema = 'backup'")[0]['count'].to_i
|
|
|
|
end
|
|
|
|
|
|
|
|
end
|