diff --git a/fence/agents/compute/fence_compute.py b/fence/agents/compute/fence_compute.py index 476109e5..d7f357a2 100644 --- a/fence/agents/compute/fence_compute.py +++ b/fence/agents/compute/fence_compute.py @@ -1,459 +1,459 @@ #!@PYTHON@ -tt import sys import time import atexit import logging import inspect import requests.exceptions sys.path.append("@FENCEAGENTSLIBDIR@") from fencing import * from fencing import fail_usage, is_executable, run_command, run_delay override_status = "" EVACUABLE_TAG = "evacuable" TRUE_TAGS = ['true'] def get_power_status(connection, options): if len(override_status): logging.debug("Pretending we're " + override_status) return override_status status = "unknown" logging.debug("get action: " + options["--action"]) if connection: try: services = connection.services.list(host=options["--plug"], binary="nova-compute") for service in services: logging.debug("Status of %s on %s is %s, %s" % (service.binary, options["--plug"], service.state, service.status)) if service.state == "up" and service.status == "enabled": # Up and operational status = "on" elif service.state == "down" and service.status == "disabled": # Down and fenced status = "off" elif service.state == "down": # Down and requires fencing status = "failed" elif service.state == "up": # Up and requires unfencing status = "running" else: logging.warning("Unknown status detected from nova for %s: %s, %s" % (options["--plug"], service.state, service.status)) status = "%s %s" % (service.state, service.status) break except requests.exception.ConnectionError as err: logging.warning("Nova connection failed: " + str(err)) logging.debug("Final status of %s is %s" % (options["--plug"], status)) return status def get_power_status_simple(connection, options): status = get_power_status(connection, options) if status in [ "off" ]: return status return "on" def set_attrd_status(host, status, options): logging.debug("Setting fencing status for %s to %s" % (host, status)) run_command(options, "attrd_updater -p -n evacuate -Q -N %s -U %s" % (host, status)) def get_attrd_status(host, options): (status, pipe_stdout, pipe_stderr) = run_command(options, "attrd_updater -p -n evacuate -Q -N %s" % (host)) fields = pipe_stdout.split('"') if len(fields) > 6: return fields[5] logging.debug("Got %s: o:%s e:%s n:%d" % (status, pipe_stdout, pipe_stderr, len(fields))) return "" def set_power_status_on(connection, options): # Wait for any evacuations to complete while True: current = get_attrd_status(options["--plug"], options) if current in ["no", ""]: logging.info("Evacuation complete for: %s '%s'" % (options["--plug"], current)) break else: logging.info("Waiting for %s to complete evacuations: %s" % (options["--plug"], current)) time.sleep(2) status = get_power_status(connection, options) # Should we do it for 'failed' too? if status in [ "off", "running", "failed" ]: try: # Forcing the host back up logging.info("Forcing nova-compute back up on "+options["--plug"]) connection.services.force_down(options["--plug"], "nova-compute", force_down=False) logging.info("Forced nova-compute back up on "+options["--plug"]) except Exception as e: # In theory, if force_down=False fails, that's for the exact # same possible reasons that below with force_down=True # eg. either an incompatible version or an old client. # Since it's about forcing back to a default value, there is # no real worries to just consider it's still okay even if the # command failed logging.warn("Exception from attempt to force " "host back up via nova API: " "%s: %s" % (e.__class__.__name__, e)) # Forcing the service back up in case it was disabled logging.info("Enabling nova-compute on "+options["--plug"]) connection.services.enable(options["--plug"], 'nova-compute') # Pretend we're 'on' so that the fencing library doesn't loop forever waiting for the node to boot override_status = "on" elif status not in ["on"]: # Not safe to unfence, don't waste time looping to see if the status changes to "on" options["--power-timeout"] = "0" def set_power_status_off(connection, options): status = get_power_status(connection, options) if status in [ "off" ]: return connection.services.disable(options["--plug"], 'nova-compute') try: # Until 2.53 connection.services.force_down( options["--plug"], "nova-compute", force_down=True) except Exception as e: # Something went wrong when we tried to force the host down. # That could come from either an incompatible API version # eg. UnsupportedVersion or VersionNotFoundForAPIMethod # or because novaclient is old and doesn't include force_down yet # eg. AttributeError # In that case, fallbacking to wait for Nova to catch the right state. logging.error("Exception from attempt to force host down via nova API: " "%s: %s" % (e.__class__.__name__, e)) # need to wait for nova to update its internal status or we # cannot call host-evacuate while get_power_status(connection, options) not in ["off"]: # Loop forever if need be. # # Some callers (such as Pacemaker) will have a timer # running and kill us if necessary logging.debug("Waiting for nova to update its internal state for %s" % options["--plug"]) time.sleep(1) set_attrd_status(options["--plug"], "yes", options) def set_power_status(connection, options): global override_status override_status = "" logging.debug("set action: " + options["--action"]) if not connection: return if options["--action"] in ["off", "reboot"]: set_power_status_off(connection, options) else: set_power_status_on(connection, options) logging.debug("set action passed: " + options["--action"]) sys.exit(0) def fix_domain(connection, options): domains = {} last_domain = None if connection: # Find it in nova services = connection.services.list(binary="nova-compute") for service in services: shorthost = service.host.split('.')[0] if shorthost == service.host: # Nova is not using FQDN calculated = "" else: # Compute nodes are named as FQDN, strip off the hostname calculated = service.host.replace(shorthost+".", "") if calculated == last_domain: # Avoid complaining for each compute node with the same name # One hopes they don't appear interleaved as A.com B.com A.com B.com logging.debug("Calculated the same domain from: %s" % service.host) continue domains[calculated] = service.host last_domain = calculated if "--domain" in options and options["--domain"] != calculated: # Warn in case nova isn't available at some point logging.warning("Supplied domain '%s' does not match the one calculated from: %s" % (options["--domain"], service.host)) if len(domains) == 0 and "--domain" not in options: logging.error("Could not calculate the domain names used by compute nodes in nova") elif len(domains) == 1 and "--domain" not in options: options["--domain"] = last_domain elif len(domains) == 1 and options["--domain"] != last_domain: logging.error("Overriding supplied domain '%s' as it does not match the one calculated from: %s" % (options["--domain"], domains[last_domain])) options["--domain"] = last_domain elif len(domains) > 1: logging.error("The supplied domain '%s' did not match any used inside nova: %s" % (options["--domain"], repr(domains))) sys.exit(1) return last_domain def fix_plug_name(connection, options): if options["--action"] == "list": return if "--plug" not in options: return calculated = fix_domain(connection, options) if calculated is None or "--domain" not in options: # Nothing supplied and nova not available... what to do... nothing return short_plug = options["--plug"].split('.')[0] logging.debug("Checking target '%s' against calculated domain '%s'"% (options["--plug"], calculated)) if options["--domain"] == "": # Ensure any domain is stripped off since nova isn't using FQDN options["--plug"] = short_plug elif options["--plug"].endswith(options["--domain"]): # Plug already uses the domain, don't re-add return else: # Add the domain to the plug options["--plug"] = short_plug + "." + options["--domain"] def get_plugs_list(connection, options): result = {} if connection: services = connection.services.list(binary="nova-compute") for service in services: longhost = service.host shorthost = longhost.split('.')[0] result[longhost] = ("", None) result[shorthost] = ("", None) return result def create_nova_connection(options): nova = None try: from novaclient import client from novaclient.exceptions import NotAcceptable except ImportError: fail_usage("Nova not found or not accessible") versions = [ "2.11", "2" ] for version in versions: clientargs = inspect.getargspec(client.Client).varargs # Some versions of Openstack prior to Ocata only # supported positional arguments for username, # password and tenant. # # Versions since Ocata only support named arguments. # # So we need to use introspection to figure out how to # create a Nova client. # # Happy days # if clientargs: # OSP < 11 # ArgSpec(args=['version', 'username', 'password', 'project_id', 'auth_url'], # varargs=None, # keywords='kwargs', defaults=(None, None, None, None)) nova = client.Client(version, options["--username"], options["--password"], options["--tenant-name"], options["--auth-url"], insecure=options["--insecure"], region_name=options["--region-name"], endpoint_type=options["--endpoint-type"], http_log_debug=options.has_key("--verbose")) else: # OSP >= 11 # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None) nova = client.Client(version, username=options["--username"], password=options["--password"], tenant_name=options["--tenant-name"], auth_url=options["--auth-url"], insecure=options["--insecure"], region_name=options["--region-name"], endpoint_type=options["--endpoint-type"], http_log_debug=options.has_key("--verbose")) try: nova.hypervisors.list() return nova except NotAcceptable as e: logging.warning(e) except Exception as e: logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e)) logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions)) return None def define_new_opts(): - all_opt["endpoint-type"] = { + all_opt["endpoint_type"] = { "getopt" : "e:", "longopt" : "endpoint-type", "help" : "-e, --endpoint-type=[endpoint] Nova Endpoint type (publicURL, internalURL, adminURL)", "required" : "0", "shortdesc" : "Nova Endpoint type", "default" : "internalURL", "order": 1, } - all_opt["tenant-name"] = { + all_opt["tenant_name"] = { "getopt" : "t:", "longopt" : "tenant-name", "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant", "required" : "0", "shortdesc" : "Keystone Admin Tenant", "default" : "", "order": 1, } - all_opt["auth-url"] = { + all_opt["auth_url"] = { "getopt" : "k:", "longopt" : "auth-url", "help" : "-k, --auth-url=[url] Keystone Admin Auth URL", "required" : "0", "shortdesc" : "Keystone Admin Auth URL", "default" : "", "order": 1, } - all_opt["region-name"] = { + all_opt["region_name"] = { "getopt" : "", "longopt" : "region-name", "help" : "--region-name=[region] Region Name", "required" : "0", "shortdesc" : "Region Name", "default" : "", "order": 1, } all_opt["insecure"] = { "getopt" : "", "longopt" : "insecure", "help" : "--insecure Explicitly allow agent to perform \"insecure\" TLS (https) requests", "required" : "0", "shortdesc" : "Allow Insecure TLS Requests", "default" : "False", "order": 2, } all_opt["domain"] = { "getopt" : "d:", "longopt" : "domain", "help" : "-d, --domain=[string] DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN", "required" : "0", "shortdesc" : "DNS domain in which hosts live", "order": 5, } - all_opt["record-only"] = { + all_opt["record_only"] = { "getopt" : "r:", "longopt" : "record-only", "help" : "--record-only Record the target as needing evacuation but as yet do not intiate it", "required" : "0", "shortdesc" : "Only record the target as needing evacuation", "default" : "False", "order": 5, } - all_opt["instance-filtering"] = { + all_opt["instance_filtering"] = { "getopt" : "", "longopt" : "instance-filtering", "help" : "--instance-filtering Allow instances created from images and flavors with evacuable=true to be evacuated (or all if no images/flavors have been tagged)", "required" : "0", "shortdesc" : "Allow instances to be evacuated", "default" : "True", "order": 5, } - all_opt["no-shared-storage"] = { + all_opt["no_shared_storage"] = { "getopt" : "", "longopt" : "no-shared-storage", "help" : "--no-shared-storage Disable functionality for shared storage", "required" : "0", "shortdesc" : "Disable functionality for dealing with shared storage", "default" : "False", "order": 5, } def set_multi_power_fn(connection, options, set_power_fn, get_power_fn, retry_attempts=1): for _ in range(retry_attempts): set_power_fn(connection, options) time.sleep(int(options["--power-wait"])) for _ in range(int(options["--power-timeout"])): if get_power_fn(connection, options) != options["--action"]: time.sleep(1) else: return True return False def main(): global override_status atexit.register(atexit_handler) - device_opt = ["login", "passwd", "tenant-name", "auth-url", "fabric_fencing", - "no_login", "no_password", "port", "domain", "no-shared-storage", "endpoint-type", - "record-only", "instance-filtering", "insecure", "region-name"] + device_opt = ["login", "passwd", "tenant_name", "auth_url", "fabric_fencing", + "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type", + "record_only", "instance_filtering", "insecure", "region_name"] define_new_opts() all_opt["shell_timeout"]["default"] = "180" options = check_input(device_opt, process_input(device_opt)) docs = {} docs["shortdesc"] = "Fence agent for the automatic resurrection of OpenStack compute instances" docs["longdesc"] = "Used to tell Nova that compute nodes are down and to reschedule flagged instances" docs["vendorurl"] = "" show_docs(options, docs) if options["--record-only"] in [ "2", "Disabled", "disabled" ]: sys.exit(0) run_delay(options) logging.debug("Running "+options["--action"]) connection = create_nova_connection(options) if options["--action"] in ["off", "on", "reboot", "status"]: fix_plug_name(connection, options) if options["--action"] in ["reboot"]: options["--action"]="off" if options["--action"] in ["off", "on"]: # No status first, call our own version result = not set_multi_power_fn(connection, options, set_power_status, get_power_status_simple, 1 + int(options["--retry-on"])) elif options["--action"] in ["monitor"]: result = 0 else: result = fence_action(connection, options, set_power_status, get_power_status_simple, get_plugs_list, None) logging.debug("Result for "+options["--action"]+": "+repr(result)) if result == None: result = 0 sys.exit(result) if __name__ == "__main__": main() diff --git a/fence/agents/compute/fence_evacuate.py b/fence/agents/compute/fence_evacuate.py index f8d1f31a..b29b5dd1 100644 --- a/fence/agents/compute/fence_evacuate.py +++ b/fence/agents/compute/fence_evacuate.py @@ -1,366 +1,366 @@ #!@PYTHON@ -tt import sys import time import atexit import logging import inspect import requests.exceptions sys.path.append("@FENCEAGENTSLIBDIR@") from fencing import * from fencing import fail_usage, is_executable, run_command, run_delay EVACUABLE_TAG = "evacuable" TRUE_TAGS = ['true'] def get_power_status(connection, options): status = "unknown" logging.debug("get action: " + options["--action"]) if connection: try: services = connection.services.list(host=options["--plug"], binary="nova-compute") for service in services: logging.debug("Status of %s is %s, %s" % (service.binary, service.state, service.status)) if service.state == "up" and service.status == "enabled": # Up and operational status = "on" elif service.state == "down" and service.status == "disabled": # Down and fenced status = "off" elif service.state == "down": # Down and requires fencing status = "failed" elif service.state == "up": # Up and requires unfencing status = "running" else: logging.warning("Unknown status detected from nova for %s: %s, %s" % (options["--plug"], service.state, service.status)) status = "%s %s" % (service.state, service.status) break except requests.exception.ConnectionError as err: logging.warning("Nova connection failed: " + str(err)) return status # NOTE(sbauza); We mimic the host-evacuate module since it's only a contrib # module which is not stable def _server_evacuate(connection, server, on_shared_storage): success = False error_message = "" try: logging.debug("Resurrecting instance: %s" % server) (response, dictionary) = connection.servers.evacuate(server=server, on_shared_storage=on_shared_storage) if response == None: error_message = "No response while evacuating instance" elif response.status_code == 200: success = True error_message = response.reason else: error_message = response.reason except Exception as e: error_message = "Error while evacuating instance: %s" % e return { "uuid": server, "accepted": success, "reason": error_message, } def _is_server_evacuable(server, evac_flavors, evac_images): if server.flavor.get('id') in evac_flavors: return True if hasattr(server.image, 'get'): if server.image.get('id') in evac_images: return True logging.debug("Instance %s is not evacuable" % server.image.get('id')) return False def _get_evacuable_flavors(connection): result = [] flavors = connection.flavors.list() # Since the detailed view for all flavors doesn't provide the extra specs, # we need to call each of the flavor to get them. for flavor in flavors: tag = flavor.get_keys().get(EVACUABLE_TAG) if tag and tag.strip().lower() in TRUE_TAGS: result.append(flavor.id) return result def _get_evacuable_images(connection): result = [] images = [] if hasattr(connection, "images"): images = connection.images.list(detailed=True) elif hasattr(connection, "glance"): # OSP12+ images = connection.glance.list() for image in images: if hasattr(image, 'metadata'): tag = image.metadata.get(EVACUABLE_TAG) if tag and tag.strip().lower() in TRUE_TAGS: result.append(image.id) elif hasattr(image, 'tags'): # OSP12+ if EVACUABLE_TAG in image.tags: result.append(image.id) return result def _host_evacuate(connection, options): result = True images = _get_evacuable_images(connection) flavors = _get_evacuable_flavors(connection) servers = connection.servers.list(search_opts={'host': options["--plug"], 'all_tenants': 1 }) if options["--instance-filtering"] == "False": logging.debug("Not evacuating anything") evacuables = [] elif len(flavors) or len(images): logging.debug("Filtering images and flavors: %s %s" % (repr(flavors), repr(images))) # Identify all evacuable servers logging.debug("Checking %s" % repr(servers)) evacuables = [server for server in servers if _is_server_evacuable(server, flavors, images)] logging.debug("Evacuating %s" % repr(evacuables)) else: logging.debug("Evacuating all images and flavors") evacuables = servers if options["--no-shared-storage"] != "False": on_shared_storage = False else: on_shared_storage = True for server in evacuables: logging.debug("Processing %s" % server) if hasattr(server, 'id'): response = _server_evacuate(connection, server.id, on_shared_storage) if response["accepted"]: logging.debug("Evacuated %s from %s: %s" % (response["uuid"], options["--plug"], response["reason"])) else: logging.error("Evacuation of %s on %s failed: %s" % (response["uuid"], options["--plug"], response["reason"])) result = False else: logging.error("Could not evacuate instance: %s" % server.to_dict()) # Should a malformed instance result in a failed evacuation? # result = False return result def set_attrd_status(host, status, options): logging.debug("Setting fencing status for %s to %s" % (host, status)) run_command(options, "attrd_updater -p -n evacuate -Q -N %s -U %s" % (host, status)) def set_power_status(connection, options): logging.debug("set action: " + options["--action"]) if not connection: return if options["--action"] == "off" and not _host_evacuate(options): sys.exit(1) sys.exit(0) def get_plugs_list(connection, options): result = {} if connection: services = connection.services.list(binary="nova-compute") for service in services: longhost = service.host shorthost = longhost.split('.')[0] result[longhost] = ("", None) result[shorthost] = ("", None) return result def create_nova_connection(options): nova = None try: from novaclient import client from novaclient.exceptions import NotAcceptable except ImportError: fail_usage("Nova not found or not accessible") versions = [ "2.11", "2" ] for version in versions: clientargs = inspect.getargspec(client.Client).varargs # Some versions of Openstack prior to Ocata only # supported positional arguments for username, # password and tenant. # # Versions since Ocata only support named arguments. # # So we need to use introspection to figure out how to # create a Nova client. # # Happy days # if clientargs: # OSP < 11 # ArgSpec(args=['version', 'username', 'password', 'project_id', 'auth_url'], # varargs=None, # keywords='kwargs', defaults=(None, None, None, None)) nova = client.Client(version, options["--username"], options["--password"], options["--tenant-name"], options["--auth-url"], insecure=options["--insecure"], region_name=options["--region-name"], endpoint_type=options["--endpoint-type"], http_log_debug=options.has_key("--verbose")) else: # OSP >= 11 # ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None) nova = client.Client(version, username=options["--username"], password=options["--password"], tenant_name=options["--tenant-name"], auth_url=options["--auth-url"], insecure=options["--insecure"], region_name=options["--region-name"], endpoint_type=options["--endpoint-type"], http_log_debug=options.has_key("--verbose")) try: nova.hypervisors.list() return nova except NotAcceptable as e: logging.warning(e) except Exception as e: logging.warning("Nova connection failed. %s: %s" % (e.__class__.__name__, e)) logging.warning("Couldn't obtain a supported connection to nova, tried: %s\n" % repr(versions)) return None def define_new_opts(): - all_opt["endpoint-type"] = { + all_opt["endpoint_type"] = { "getopt" : "e:", "longopt" : "endpoint-type", "help" : "-e, --endpoint-type=[endpoint] Nova Endpoint type (publicURL, internalURL, adminURL)", "required" : "0", "shortdesc" : "Nova Endpoint type", "default" : "internalURL", "order": 1, } - all_opt["tenant-name"] = { + all_opt["tenant_name"] = { "getopt" : "t:", "longopt" : "tenant-name", "help" : "-t, --tenant-name=[tenant] Keystone Admin Tenant", "required" : "0", "shortdesc" : "Keystone Admin Tenant", "default" : "", "order": 1, } - all_opt["auth-url"] = { + all_opt["auth_url"] = { "getopt" : "k:", "longopt" : "auth-url", "help" : "-k, --auth-url=[url] Keystone Admin Auth URL", "required" : "0", "shortdesc" : "Keystone Admin Auth URL", "default" : "", "order": 1, } - all_opt["region-name"] = { + all_opt["region_name"] = { "getopt" : "", "longopt" : "region-name", "help" : "--region-name=[region] Region Name", "required" : "0", "shortdesc" : "Region Name", "default" : "", "order": 1, } all_opt["insecure"] = { "getopt" : "", "longopt" : "insecure", "help" : "--insecure Explicitly allow agent to perform \"insecure\" TLS (https) requests", "required" : "0", "shortdesc" : "Allow Insecure TLS Requests", "default" : "False", "order": 2, } all_opt["domain"] = { "getopt" : "d:", "longopt" : "domain", "help" : "-d, --domain=[string] DNS domain in which hosts live, useful when the cluster uses short names and nova uses FQDN", "required" : "0", "shortdesc" : "DNS domain in which hosts live", "order": 5, } - all_opt["instance-filtering"] = { + all_opt["instance_filtering"] = { "getopt" : "", "longopt" : "instance-filtering", "help" : "--instance-filtering Allow instances created from images and flavors with evacuable=true to be evacuated (or all if no images/flavors have been tagged)", "required" : "0", "shortdesc" : "Allow instances to be evacuated", "default" : "True", "order": 5, } - all_opt["no-shared-storage"] = { + all_opt["no_shared_storage"] = { "getopt" : "", "longopt" : "no-shared-storage", "help" : "--no-shared-storage Disable functionality for shared storage", "required" : "0", "shortdesc" : "Disable functionality for dealing with shared storage", "default" : "False", "order": 5, } def main(): atexit.register(atexit_handler) - device_opt = ["login", "passwd", "tenant-name", "auth-url", - "no_login", "no_password", "port", "domain", "no-shared-storage", "endpoint-type", - "instance-filtering", "insecure", "region-name"] + device_opt = ["login", "passwd", "tenant_name", "auth_url", + "no_login", "no_password", "port", "domain", "no_shared_storage", "endpoint_type", + "instance_filtering", "insecure", "region_name"] define_new_opts() all_opt["shell_timeout"]["default"] = "180" options = check_input(device_opt, process_input(device_opt)) docs = {} docs["shortdesc"] = "Fence agent for the automatic resurrection of OpenStack compute instances" docs["longdesc"] = "Used to reschedule flagged instances" docs["vendorurl"] = "" show_docs(options, docs) run_delay(options) connection = create_nova_connection(options) # Un-evacuating a server doesn't make sense if options["--action"] in ["on"]: logging.error("Action %s is not supported by this agent" % (options["--action"])) sys.exit(1) if options["--action"] in ["off", "reboot"]: status = get_power_status(connection, options) if status != "off": logging.error("Cannot resurrect instances from %s in state '%s'" % (options["--plug"], status)) sys.exit(1) elif not _host_evacuate(connection, options): logging.error("Resurrection of instances from %s failed" % (options["--plug"])) sys.exit(1) logging.info("Resurrection of instances from %s complete" % (options["--plug"])) sys.exit(0) result = fence_action(connection, options, set_power_status, get_power_status, get_plugs_list, None) sys.exit(result) if __name__ == "__main__": main() diff --git a/fence/agents/scsi/fence_scsi.py b/fence/agents/scsi/fence_scsi.py index dbc9c501..99c426e0 100644 --- a/fence/agents/scsi/fence_scsi.py +++ b/fence/agents/scsi/fence_scsi.py @@ -1,501 +1,501 @@ #!@PYTHON@ -tt import sys import stat import re import os import time import logging import atexit import hashlib import ctypes sys.path.append("@FENCEAGENTSLIBDIR@") from fencing import fail_usage, run_command, atexit_handler, check_input, process_input, show_docs, fence_action, all_opt from fencing import run_delay STORE_PATH = "/var/run/cluster/fence_scsi" def get_status(conn, options): del conn status = "off" for dev in options["devices"]: is_block_device(dev) reset_dev(options, dev) if options["--key"] in get_registration_keys(options, dev): status = "on" else: logging.debug("No registration for key "\ + options["--key"] + " on device " + dev + "\n") if options["--action"] == "on": status = "off" break return status def set_status(conn, options): del conn count = 0 if options["--action"] == "on": set_key(options) for dev in options["devices"]: is_block_device(dev) register_dev(options, dev) if options["--key"] not in get_registration_keys(options, dev): count += 1 logging.debug("Failed to register key "\ + options["--key"] + "on device " + dev + "\n") continue dev_write(dev, options) if get_reservation_key(options, dev) is None \ and not reserve_dev(options, dev) \ and get_reservation_key(options, dev) is None: count += 1 logging.debug("Failed to create reservation (key="\ + options["--key"] + ", device=" + dev + ")\n") else: host_key = get_key() if host_key == options["--key"].lower(): fail_usage("Failed: keys cannot be same. You can not fence yourself.") for dev in options["devices"]: is_block_device(dev) if options["--key"] in get_registration_keys(options, dev): preempt_abort(options, host_key, dev) for dev in options["devices"]: if options["--key"] in get_registration_keys(options, dev): count += 1 logging.debug("Failed to remove key "\ + options["--key"] + " on device " + dev + "\n") continue if not get_reservation_key(options, dev): count += 1 logging.debug("No reservation exists on device " + dev + "\n") if count: logging.error("Failed to verify " + str(count) + " device(s)") sys.exit(1) # check if host is ready to execute actions def do_action_monitor(options): # Check if required binaries are installed if bool(run_cmd(options, options["--sg_persist-path"] + " -V")["err"]): logging.error("Unable to run " + options["--sg_persist-path"]) return 1 elif bool(run_cmd(options, options["--sg_turs-path"] + " -V")["err"]): logging.error("Unable to run " + options["--sg_turs-path"]) return 1 elif ("--devices" not in options and bool(run_cmd(options, options["--vgs-path"] + " --version")["err"])): logging.error("Unable to run " + options["--vgs-path"]) return 1 # Keys have to be present in order to fence/unfence get_key() dev_read() return 0 #run command, returns dict, ret["err"] = exit code; ret["out"] = output def run_cmd(options, cmd): ret = {} (ret["err"], ret["out"], _) = run_command(options, cmd) ret["out"] = "".join([i for i in ret["out"] if i is not None]) return ret # check if device exist and is block device def is_block_device(dev): if not os.path.exists(dev): fail_usage("Failed: device \"" + dev + "\" does not exist") if not stat.S_ISBLK(os.stat(dev).st_mode): fail_usage("Failed: device \"" + dev + "\" is not a block device") # cancel registration def preempt_abort(options, host, dev): reset_dev(options,dev) cmd = options["--sg_persist-path"] + " -n -o -A -T 5 -K " + host + " -S " + options["--key"] + " -d " + dev return not bool(run_cmd(options, cmd)["err"]) def reset_dev(options, dev): return run_cmd(options, options["--sg_turs-path"] + " " + dev)["err"] def register_dev(options, dev): dev = os.path.realpath(dev) if re.search(r"^dm", dev[5:]): for slave in get_mpath_slaves(dev): register_dev(options, slave) return True reset_dev(options, dev) cmd = options["--sg_persist-path"] + " -n -o -I -S " + options["--key"] + " -d " + dev cmd += " -Z" if "--aptpl" in options else "" #cmd return code != 0 but registration can be successful return not bool(run_cmd(options, cmd)["err"]) def reserve_dev(options, dev): reset_dev(options,dev) cmd = options["--sg_persist-path"] + " -n -o -R -T 5 -K " + options["--key"] + " -d " + dev return not bool(run_cmd(options, cmd)["err"]) def get_reservation_key(options, dev): reset_dev(options,dev) cmd = options["--sg_persist-path"] + " -n -i -r -d " + dev out = run_cmd(options, cmd) if out["err"]: fail_usage("Cannot get reservation key") match = re.search(r"\s+key=0x(\S+)\s+", out["out"], re.IGNORECASE) return match.group(1) if match else None def get_registration_keys(options, dev): reset_dev(options,dev) keys = [] cmd = options["--sg_persist-path"] + " -n -i -k -d " + dev out = run_cmd(options, cmd) if out["err"]: fail_usage("Cannot get registration keys") for line in out["out"].split("\n"): match = re.search(r"\s+0x(\S+)\s*", line) if match: keys.append(match.group(1)) return keys def get_cluster_id(options): cmd = options["--corosync-cmap-path"] + " totem.cluster_name" match = re.search(r"\(str\) = (\S+)\n", run_cmd(options, cmd)["out"]) if not match: fail_usage("Failed: cannot get cluster name") try: return hashlib.md5(match.group(1)).hexdigest() except ValueError: # FIPS requires usedforsecurity=False and might not be # available on all distros: https://bugs.python.org/issue9216 return hashlib.md5(match.group(1), usedforsecurity=False).hexdigest() def get_node_id(options): cmd = options["--corosync-cmap-path"] + " nodelist." match = re.search(r".(\d).ring._addr \(str\) = " + options["--nodename"] + "\n", run_cmd(options, cmd)["out"]) return match.group(1) if match else fail_usage("Failed: unable to parse output of corosync-cmapctl or node does not exist") def generate_key(options): return "%.4s%.4d" % (get_cluster_id(options), int(get_node_id(options))) # save node key to file def set_key(options): file_path = options["store_path"] + ".key" if not os.path.isdir(os.path.dirname(options["store_path"])): os.makedirs(os.path.dirname(options["store_path"])) try: f = open(file_path, "w") except IOError: fail_usage("Failed: Cannot open file \""+ file_path + "\"") f.write(options["--key"].lower() + "\n") f.close() # read node key from file def get_key(fail=True): file_path = STORE_PATH + ".key" try: f = open(file_path, "r") except IOError: if fail: fail_usage("Failed: Cannot open file \""+ file_path + "\"") else: return None return f.readline().strip().lower() def dev_write(dev, options): file_path = options["store_path"] + ".dev" if not os.path.isdir(os.path.dirname(options["store_path"])): os.makedirs(os.path.dirname(options["store_path"])) try: f = open(file_path, "a+") except IOError: fail_usage("Failed: Cannot open file \""+ file_path + "\"") out = f.read() if not re.search(r"^" + dev + "\s+", out): f.write(dev + "\n") f.close() def dev_read(fail=True): file_path = STORE_PATH + ".dev" try: f = open(file_path, "r") except IOError: if fail: fail_usage("Failed: Cannot open file \"" + file_path + "\"") else: return None # get not empty lines from file devs = [line.strip() for line in f if line.strip()] f.close() return devs def dev_delete(options): file_path = options["store_path"] + ".dev" os.remove(file_path) if os.path.exists(file_path) else None def get_clvm_devices(options): devs = [] cmd = options["--vgs-path"] + " " +\ "--noheadings " +\ "--separator : " +\ "--sort pv_uuid " +\ "--options vg_attr,pv_name "+\ "--config 'global { locking_type = 0 } devices { preferred_names = [ \"^/dev/dm\" ] }'" out = run_cmd(options, cmd) if out["err"]: fail_usage("Failed: Cannot get clvm devices") for line in out["out"].split("\n"): if 'c' in line.split(":")[0]: devs.append(line.split(":")[1]) return devs def get_mpath_slaves(dev): if dev[:5] == "/dev/": dev = dev[5:] slaves = [i for i in os.listdir("/sys/block/" + dev + "/slaves/") if i[:1] != "."] if slaves[0][:2] == "dm": slaves = get_mpath_slaves(slaves[0]) else: slaves = ["/dev/" + x for x in slaves] return slaves def define_new_opts(): all_opt["devices"] = { "getopt" : "d:", "longopt" : "devices", "help" : "-d, --devices=[devices] List of devices to use for current operation", "required" : "0", "shortdesc" : "List of devices to use for current operation. Devices can \ be comma-separated list of raw devices (eg. /dev/sdc). Each device must support SCSI-3 \ persistent reservations.", "order": 1 } all_opt["nodename"] = { "getopt" : "n:", "longopt" : "nodename", "help" : "-n, --nodename=[nodename] Name of the node to be fenced", "required" : "0", "shortdesc" : "Name of the node to be fenced. The node name is used to \ generate the key value used for the current operation. This option will be \ ignored when used with the -k option.", "order": 1 } all_opt["key"] = { "getopt" : "k:", "longopt" : "key", "help" : "-k, --key=[key] Key to use for the current operation", "required" : "0", "shortdesc" : "Key to use for the current operation. This key should be \ unique to a node. For the \"on\" action, the key specifies the key use to \ register the local node. For the \"off\" action, this key specifies the key to \ be removed from the device(s).", "order": 1 } all_opt["aptpl"] = { "getopt" : "a", "longopt" : "aptpl", "help" : "-a, --aptpl Use the APTPL flag for registrations", "required" : "0", "shortdesc" : "Use the APTPL flag for registrations. This option is only used for the 'on' action.", "order": 1 } all_opt["logfile"] = { "getopt" : ":", "longopt" : "logfile", "help" : "-f, --logfile Log output (stdout and stderr) to file", "required" : "0", "shortdesc" : "Log output (stdout and stderr) to file", "order": 5 } - all_opt["corosync-cmap_path"] = { + all_opt["corosync_cmap_path"] = { "getopt" : ":", "longopt" : "corosync-cmap-path", "help" : "--corosync-cmap-path=[path] Path to corosync-cmapctl binary", "required" : "0", "shortdesc" : "Path to corosync-cmapctl binary", "default" : "@COROSYNC_CMAPCTL_PATH@", "order": 300 } all_opt["sg_persist_path"] = { "getopt" : ":", "longopt" : "sg_persist-path", "help" : "--sg_persist-path=[path] Path to sg_persist binary", "required" : "0", "shortdesc" : "Path to sg_persist binary", "default" : "@SG_PERSIST_PATH@", "order": 300 } all_opt["sg_turs_path"] = { "getopt" : ":", "longopt" : "sg_turs-path", "help" : "--sg_turs-path=[path] Path to sg_turs binary", "required" : "0", "shortdesc" : "Path to sg_turs binary", "default" : "@SG_TURS_PATH@", "order": 300 } all_opt["vgs_path"] = { "getopt" : ":", "longopt" : "vgs-path", "help" : "--vgs-path=[path] Path to vgs binary", "required" : "0", "shortdesc" : "Path to vgs binary", "default" : "@VGS_PATH@", "order": 300 } def scsi_check_get_verbose(): try: f = open("/etc/sysconfig/watchdog", "r") except IOError: return False match = re.search(r"^\s*verbose=yes", "".join(f.readlines()), re.MULTILINE) f.close() return bool(match) def scsi_check(hardreboot=False): if len(sys.argv) >= 3 and sys.argv[1] == "repair": return int(sys.argv[2]) options = {} options["--sg_turs-path"] = "@SG_TURS_PATH@" options["--sg_persist-path"] = "@SG_PERSIST_PATH@" options["--power-timeout"] = "5" if scsi_check_get_verbose(): logging.getLogger().setLevel(logging.DEBUG) devs = dev_read(fail=False) if not devs: logging.error("No devices found") return 0 key = get_key(fail=False) if not key: logging.error("Key not found") return 0 for dev in devs: if key in get_registration_keys(options, dev): logging.debug("key " + key + " registered with device " + dev) return 0 else: logging.debug("key " + key + " not registered with device " + dev) logging.debug("key " + key + " registered with any devices") if hardreboot == True: libc = ctypes.cdll['libc.so.6'] libc.reboot(0x1234567) return 2 def main(): atexit.register(atexit_handler) device_opt = ["no_login", "no_password", "devices", "nodename", "key",\ - "aptpl", "fabric_fencing", "on_target", "corosync-cmap_path",\ + "aptpl", "fabric_fencing", "on_target", "corosync_cmap_path",\ "sg_persist_path", "sg_turs_path", "logfile", "vgs_path", "force_on"] define_new_opts() all_opt["delay"]["getopt"] = "H:" #fence_scsi_check if os.path.basename(sys.argv[0]) == "fence_scsi_check": sys.exit(scsi_check()) elif os.path.basename(sys.argv[0]) == "fence_scsi_check_hardreboot": sys.exit(scsi_check(True)) options = check_input(device_opt, process_input(device_opt), other_conditions=True) docs = {} docs["shortdesc"] = "Fence agent for SCSI persistent reservation" docs["longdesc"] = "fence_scsi is an I/O fencing agent that uses SCSI-3 \ persistent reservations to control access to shared storage devices. These \ devices must support SCSI-3 persistent reservations (SPC-3 or greater) as \ well as the \"preempt-and-abort\" subcommand.\nThe fence_scsi agent works by \ having each node in the cluster register a unique key with the SCSI \ device(s). Once registered, a single node will become the reservation holder \ by creating a \"write exclusive, registrants only\" reservation on the \ device(s). The result is that only registered nodes may write to the \ device(s). When a node failure occurs, the fence_scsi agent will remove the \ key belonging to the failed node from the device(s). The failed node will no \ longer be able to write to the device(s). A manual reboot is required." docs["vendorurl"] = "" show_docs(options, docs) run_delay(options) # backward compatibility layer BEGIN if "--logfile" in options: try: logfile = open(options["--logfile"], 'w') sys.stderr = logfile sys.stdout = logfile except IOError: fail_usage("Failed: Unable to create file " + options["--logfile"]) # backward compatibility layer END options["store_path"] = STORE_PATH # Input control BEGIN stop_after_error = False if options["--action"] == "validate-all" else True if options["--action"] == "monitor": sys.exit(do_action_monitor(options)) if not (("--nodename" in options and options["--nodename"])\ or ("--key" in options and options["--key"])): fail_usage("Failed: nodename or key is required", stop_after_error) if not ("--key" in options and options["--key"]): options["--key"] = generate_key(options) if options["--key"] == "0" or not options["--key"]: fail_usage("Failed: key cannot be 0", stop_after_error) if options["--action"] == "validate-all": sys.exit(0) options["--key"] = options["--key"].lstrip('0') if not ("--devices" in options and options["--devices"].split(",")): options["devices"] = get_clvm_devices(options) else: options["devices"] = options["--devices"].split(",") if not options["devices"]: fail_usage("Failed: No devices found") # Input control END result = fence_action(None, options, set_status, get_status) sys.exit(result) if __name__ == "__main__": main() diff --git a/tests/data/metadata/fence_compute.xml b/tests/data/metadata/fence_compute.xml index 2945d1cb..98c7d480 100644 --- a/tests/data/metadata/fence_compute.xml +++ b/tests/data/metadata/fence_compute.xml @@ -1,207 +1,172 @@ Used to tell Nova that compute nodes are down and to reschedule flagged instances Fencing action - + Keystone Admin Auth URL - - - - Keystone Admin Auth URL - - - - - Nova Endpoint type - - + Nova Endpoint type Login name Login password or passphrase Script to run to retrieve password Login password or passphrase Script to run to retrieve password Physical plug number on device, UUID or identification of machine Physical plug number on device, UUID or identification of machine - + Region Name - - - - Region Name - - - - - Keystone Admin Tenant - - + Keystone Admin Tenant Login name Allow Insecure TLS Requests DNS domain in which hosts live - + Allow instances to be evacuated - - - - Allow instances to be evacuated - - - - - Disable functionality for dealing with shared storage - - + Disable functionality for dealing with shared storage - - - - Only record the target as needing evacuation - - + Only record the target as needing evacuation Disable logging to stderr. Does not affect --verbose or --debug logging to syslog. Verbose mode Write debug information to given file Write debug information to given file Display version information and exit Display help and exit Separator for CSV created by 'list' operation Wait X seconds before fencing is started Wait X seconds for cmd prompt after login Test X seconds for status change after ON/OFF Wait X seconds after issuing ON/OFF Wait X seconds for cmd prompt after issuing command Count of attempts to retry power on diff --git a/tests/data/metadata/fence_evacuate.xml b/tests/data/metadata/fence_evacuate.xml index e2c13494..a636af60 100644 --- a/tests/data/metadata/fence_evacuate.xml +++ b/tests/data/metadata/fence_evacuate.xml @@ -1,198 +1,168 @@ Used to reschedule flagged instances Fencing action - + Keystone Admin Auth URL - - - - Keystone Admin Auth URL - - - - - Nova Endpoint type - - + Nova Endpoint type Login name Login password or passphrase Script to run to retrieve password Login password or passphrase Script to run to retrieve password Physical plug number on device, UUID or identification of machine Physical plug number on device, UUID or identification of machine - - - - Region Name - - + Region Name - - - - Keystone Admin Tenant - - + Keystone Admin Tenant Login name Allow Insecure TLS Requests DNS domain in which hosts live - - - - Allow instances to be evacuated - - + Allow instances to be evacuated - - - - Disable functionality for dealing with shared storage - - + Disable functionality for dealing with shared storage Disable logging to stderr. Does not affect --verbose or --debug logging to syslog. Verbose mode Write debug information to given file Write debug information to given file Display version information and exit Display help and exit Separator for CSV created by 'list' operation Wait X seconds before fencing is started Wait X seconds for cmd prompt after login Test X seconds for status change after ON/OFF Wait X seconds after issuing ON/OFF Wait X seconds for cmd prompt after issuing command Count of attempts to retry power on diff --git a/tests/data/metadata/fence_scsi.xml b/tests/data/metadata/fence_scsi.xml index 58447b69..54fc9470 100644 --- a/tests/data/metadata/fence_scsi.xml +++ b/tests/data/metadata/fence_scsi.xml @@ -1,126 +1,122 @@ fence_scsi is an I/O fencing agent that uses SCSI-3 persistent reservations to control access to shared storage devices. These devices must support SCSI-3 persistent reservations (SPC-3 or greater) as well as the "preempt-and-abort" subcommand. The fence_scsi agent works by having each node in the cluster register a unique key with the SCSI device(s). Once registered, a single node will become the reservation holder by creating a "write exclusive, registrants only" reservation on the device(s). The result is that only registered nodes may write to the device(s). When a node failure occurs, the fence_scsi agent will remove the key belonging to the failed node from the device(s). The failed node will no longer be able to write to the device(s). A manual reboot is required. Fencing action Use the APTPL flag for registrations. This option is only used for the 'on' action. List of devices to use for current operation. Devices can be comma-separated list of raw devices (eg. /dev/sdc). Each device must support SCSI-3 persistent reservations. Key to use for the current operation. This key should be unique to a node. For the "on" action, the key specifies the key use to register the local node. For the "off" action, this key specifies the key to be removed from the device(s). Name of the node to be fenced. The node name is used to generate the key value used for the current operation. This option will be ignored when used with the -k option. Log output (stdout and stderr) to file Disable logging to stderr. Does not affect --verbose or --debug logging to syslog. Verbose mode Write debug information to given file Write debug information to given file Display version information and exit Display help and exit Wait X seconds before fencing is started Wait X seconds for cmd prompt after login Test X seconds for status change after ON/OFF Wait X seconds after issuing ON/OFF Wait X seconds for cmd prompt after issuing command Count of attempts to retry power on - - - Path to corosync-cmapctl binary - - + Path to corosync-cmapctl binary Path to sg_persist binary Path to sg_turs binary Path to vgs binary