ipa-replica-manage: use server_del when removing domain level 1 replica

`ipa-replica-manage del` will now call `server_del` behind the scenes when a
removal of replica from managed topology is requested. The existing removal
options were mapped on the server_del options to maintain backwards
compatibility with earlier versions.

https://fedorahosted.org/freeipa/ticket/5588

Reviewed-By: Martin Basti <mbasti@redhat.com>
This commit is contained in:
Martin Babinsky 2016-06-08 18:34:37 +02:00 committed by Martin Basti
parent 081941a5b9
commit 47decc9b84
2 changed files with 22 additions and 241 deletions

View File

@ -26,7 +26,6 @@ import os
import re
import ldap
import socket
import time
import traceback
from six.moves.urllib.parse import urlparse
@ -920,139 +919,17 @@ def del_master_managed(realm, hostname, options):
print("Can't remove itself: %s" % (options.host))
sys.exit(1)
try:
api.Command.server_show(hostname_u)
except errors.NotFound:
if not options.cleanup:
print("{hostname} is not listed among IPA masters.".format(
hostname=hostname))
print("Please specify an actual server or add the --cleanup "
"option to force clean up.")
sys.exit(1)
server_del_options = dict(
force=options.cleanup,
ignore_topology_disconnect=options.force,
ignore_last_of_role=options.force
)
# 1. Connect to the local server
try:
thisrepl = replication.ReplicationManager(realm, options.host,
options.dirman_passwd)
replication.run_server_del_as_cli(
api, hostname_u, **server_del_options)
except Exception as e:
print("Failed to connect to server %s: %s" % (options.host, e))
sys.exit(1)
# 2. Get all masters
masters = api.Command.server_find(
'', sizelimit=0, no_members=False)['result']
# 3. Check topology connectivity in all suffixes
topo_errors = replication.check_last_link_managed(api, hostname, masters)
any_topo_error = any(topo_errors[t][0] or topo_errors[t][1]
for t in topo_errors)
if any_topo_error:
if not options.force:
sys.exit("Aborted")
else:
print("Forcing removal of %s" % hostname)
# 4. Check that we are not leaving the installation without CA and/or DNS
# And pick new CA master.
ensure_last_services(api.Backend.ldap2, hostname, masters, options)
# 5. Remove master entry. Topology plugin will remove replication agreements.
try:
api.Command.server_del(hostname_u)
except errors.NotFound:
print("Server entry already deleted: %s" % (hostname))
# 6. Cleanup
try:
thisrepl.replica_cleanup(hostname, realm, force=True)
except Exception as e:
print("Failed to cleanup %s entries: %s" % (hostname, e))
print("You may need to manually remove them from the tree")
# 7. Clean RUV for the deleted master
# Wait for topology plugin to delete segments
check_deleted_segments(hostname_u, masters, topo_errors, options.host)
# Clean RUV is handled by the topolgy plugin
# 8. And clean up the removed replica DNS entries if any.
cleanup_server_dns_entries(realm, hostname, thisrepl.suffix, options)
def check_deleted_segments(hostname, masters, topo_errors, starting_host):
def wait_for_segment_removal(hostname, master_cns, suffix_name,
topo_errors):
i = 0
while True:
left = api.Command.topologysegment_find(
suffix_name, iparepltoposegmentleftnode=hostname, sizelimit=0
)['result']
right = api.Command.topologysegment_find(
suffix_name, iparepltoposegmentrightnode=hostname, sizelimit=0
)['result']
# Relax check if topology was or is disconnected. Disconnected
# topology can contain segments with already deleted servers.
# Check only if segments of servers, which can contact this server,
# and the deleted server were removed.
# This code should handle a case where there was a topology with
# a central node(B): A <-> B <-> C, where A is current server.
# After removal of B, topology will be disconnected and removal of
# segment B <-> C won't be replicated back to server A, therefore
# presence of the segment has to be ignored.
if topo_errors[0] or topo_errors[1]:
# use errors after deletion because we don't care if some
# server can't contact the deleted one
cant_contact_me = [e[0] for e in topo_errors[1]
if starting_host in e[2]]
can_contact_me = set(master_cns) - set(cant_contact_me)
left = [s for s in left if s['iparepltoposegmentrightnode'][0]
in can_contact_me]
right = [s for s in right if s['iparepltoposegmentleftnode'][0]
in can_contact_me]
if not left and not right:
print("Agreements deleted")
return
time.sleep(2)
if i == 2: # taking too long, something is wrong, report
print("Waiting for removal of replication agreements")
if i > 90:
print("Taking too long, skipping")
print("Following segments were not deleted:")
for s in left:
print(" %s" % s['cn'][0])
for s in right:
print(" %s" % s['cn'][0])
return
i += 1
if not replication.check_hostname_in_masters(hostname, masters):
print("{0} not in masters, skipping agreement deletion check".format(
hostname))
return
suffix_to_masters = replication.map_masters_to_suffixes(masters)
for suffix_name in suffix_to_masters:
suffix_member_cns = [
m['cn'][0] for m in suffix_to_masters[suffix_name]
]
if hostname not in suffix_member_cns:
# If the server was already deleted, we can expect that all
# removals had been done in previous run and dangling segments
# were not deleted.
print("Skipping replication agreement deletion check for "
"suffix '{0}'".format(suffix_name))
continue
print("Checking for deleted segments in suffix '{0}'".format(
suffix_name))
wait_for_segment_removal(hostname, suffix_member_cns, suffix_name,
topo_errors[suffix_name])
sys.exit(e)
def del_master_direct(realm, hostname, options):

View File

@ -29,9 +29,8 @@ from random import randint
import ldap
from ipalib import api, errors
from ipalib.cli import textui
from ipalib.constants import CACERT
from ipaserver.topology import (
create_topology_graph, get_topology_connection_errors)
from ipapython.ipa_log_manager import root_logger
from ipapython import ipautil, ipaldap
from ipapython.dn import DN
@ -1768,116 +1767,21 @@ class CAReplicationManager(ReplicationManager):
raise RuntimeError("Failed to start replication")
def map_masters_to_suffixes(masters):
masters_to_suffix = {}
for master in masters:
try:
managed_suffixes = master['iparepltopomanagedsuffix_topologysuffix']
except KeyError:
print("IPA master {0} does not manage any suffix")
continue
for suffix_name in managed_suffixes:
try:
masters_to_suffix[suffix_name].append(master)
except KeyError:
masters_to_suffix[suffix_name] = [master]
return masters_to_suffix
def check_hostname_in_masters(hostname, masters):
master_cns = {m['cn'][0] for m in masters}
return hostname in master_cns
def get_orphaned_suffixes(masters):
def run_server_del_as_cli(api_instance, hostname, **options):
"""
:param masters: result of server_find command
:return a set consisting of suffix names which are not managed by any
master
run server_del API command and print the result to stdout/stderr using
textui backend.
:params api_instance: API instance
:params hostname: server FQDN
:params options: options for server_del command
"""
all_suffixes = api.Command.topologysuffix_find(
sizelimit=0)['result']
all_suffix_names = set(s['cn'][0] for s in all_suffixes)
managed_suffixes = set(map_masters_to_suffixes(masters))
server_del_cmd = api_instance.Command.server_del
return all_suffix_names ^ managed_suffixes
if 'version' not in options:
options['version'] = api_instance.env.api_version
result = server_del_cmd(hostname, **options)
def check_last_link_managed(api, hostname, masters):
"""
Check if 'hostname' is safe to delete.
:returns: a dictionary of topology errors across all suffixes in the form
{<suffix name>: (<original errors>,
<errors after removing the node>)}
"""
suffix_to_masters = map_masters_to_suffixes(masters)
topo_errors_by_suffix = {}
# sanity check for orphaned suffixes
orphaned_suffixes = get_orphaned_suffixes(masters)
if orphaned_suffixes:
print("The following suffixes are not managed by any IPA master:")
print(" {0}".format(
', '.join(sorted(orphaned_suffixes))
)
)
for suffix_name in suffix_to_masters:
print("Checking connectivity in topology suffix '{0}'".format(
suffix_name))
if not check_hostname_in_masters(hostname,
suffix_to_masters[suffix_name]):
print(
"'{0}' is not a part of topology suffix '{1}'".format(
hostname, suffix_name
)
)
print("Not checking connectivity")
continue
segments = api.Command.topologysegment_find(
suffix_name, sizelimit=0).get('result')
graph = create_topology_graph(suffix_to_masters[suffix_name], segments)
# check topology before removal
orig_errors = get_topology_connection_errors(graph)
if orig_errors:
print("Current topology in suffix '{0}' is disconnected:".format(
suffix_name))
print("Changes are not replicated to all servers and data are "
"probably inconsistent.")
print("You need to add segments to reconnect the topology.")
print_connect_errors(orig_errors)
# after removal
try:
graph.remove_vertex(hostname)
except ValueError:
pass # ignore already deleted master, continue to clean
new_errors = get_topology_connection_errors(graph)
if new_errors:
print("WARNING: Removal of '{0}' will lead to disconnected "
"topology in suffix '{1}'".format(hostname, suffix_name))
print("Changes will not be replicated to all servers and data will"
" become inconsistent.")
print("You need to add segments to prevent disconnection of the "
"topology.")
print("Errors in topology after removal:")
print_connect_errors(new_errors)
topo_errors_by_suffix[suffix_name] = (orig_errors, new_errors)
return topo_errors_by_suffix
def print_connect_errors(errors):
for error in errors:
print("Topology does not allow server %s to replicate with servers:"
% error[0])
for srv in error[2]:
print(" %s" % srv)
textui_backend = textui(api_instance)
server_del_cmd.output_for_cli(textui_backend, result, hostname, **options)