diff --git a/cts/CIB.py b/cts/CIB.py index bbf0c73fd4..e3ac13aa21 100644 --- a/cts/CIB.py +++ b/cts/CIB.py @@ -1,565 +1,565 @@ '''CTS: Cluster Testing System: CIB generator ''' __copyright__=''' Author: Andrew Beekhof Copyright (C) 2008 Andrew Beekhof ''' from UserDict import UserDict import sys, time, types, syslog, os, struct, string, signal, traceback, warnings, socket from cts.CTSvars import * from cts.CTS import ClusterManager class CibBase: cts_cib = None cib_tmpfile = None version = "unknown" feature_set = "unknown" target = None def __init__(self, CM, tmpfile=None): self.CM = CM #self.target = self.CM.Env["nodes"][0] if not tmpfile: warnings.filterwarnings("ignore") self.cib_tmpfile=os.tmpnam() warnings.resetwarnings() else: self.cib_tmpfile = tmpfile def version(self): return self.version def NextIP(self): fields = string.split(self.CM.Env["IPBase"], '.') fields[3] = str(int(fields[3])+1) ip = string.join(fields, '.') self.CM.Env["IPBase"] = ip return ip class CIB06(CibBase): version = "transitional-0.6" coloc_template = """""" cib_template =''' %s %s %s ''' cib_option_template = ''' ''' lsb_resource = ''' ''' clustermon_location_constraint = ''' ''' resource_group_template = '''%s %s %s''' per_node_constraint_template = ''' ''' pingd_constraint_template = ''' ''' dummy_resource_template = ''' ''' clustermon_resource_template = ''' ''' master_slave_resource = ''' ''' pingd_resource_template = """ """ stonith_resource_template = """ """ bsc_template = ''' ''' def NewIP(self, name=None): template = ''' ''' ip = self.NextIP() if not name: name = "r"+ip return template % (name, name, name, name, name, ip) def NewHBIP(self, name=None): template = ''' ''' ip = self.NextIP() if not name: name = "r"+ip return template % (name, name, name, name, ip) def NewDummy(self, name): return self.dummy_resource_template % (name, name, name, name) def install(self, target): self.CM.rsh("localhost", "echo \'" + self.contents(target) + "\' > " + self.cib_tmpfile) rc = self.CM.rsh.cp(cib_file, "root@%s:%s/cib.xml" + (target, CTSvars.CRM_CONFIG_DIR)) if rc != 0: raise ValueError("Can not copy %s to %s (%d)"%(self.cib_tmpfile, target, rc)) self.CM.rsh(target, "chown "+CTSvars.CRM_DAEMON_USER+" "+CTSvars.CRM_CONFIG_DIR+"/cib.xml") self.CM.rsh("localhost", "rm -f "+self.cib_tmpfile) def contents(self, target=None): # fencing resource if self.cts_cib: return self.cts_cib nodelist = "" num_nodes = 0 for node in self.CM.Env["nodes"]: nodelist += node + " " num_nodes = num_nodes + 1 no_quorum = "stop" if num_nodes < 3: no_quorum = "ignore" self.CM.debug("Cluster only has %d nodes, ignoring quorum" % num_nodes) #make up crm config cib_options = self.cib_option_template % (self.CM.Env["DoFencing"], no_quorum) #create resources and their constraints resources = "" constraints = "" if self.CM.Env["DoBSC"] == 1: cib_options = cib_options + self.bsc_template if self.CM.Env["CIBResource"] != 1: # generate cib self.cts_cib = self.cib_template % (cib_options, resources, constraints) return self.cts_cib if self.CM.cluster_monitor == 1: resources += self.clustermon_resource_template constraints += self.clustermon_location_constraint ip1_rsc = self.NewIP() ip2_rsc = self.NewHBIP() ip3_rsc = self.NewIP() resources += self.resource_group_template % (ip1_rsc, ip2_rsc, ip3_rsc) # lsb resource resources += self.lsb_resource # Migrator resources += self.NewDummy("migrator") constraints += self.coloc_template % ("group-with-master", "group-1", "master-1", "Master", "INFINITY") constraints += self.coloc_template % ("lsb-with-group", "lsb_dummy", "group-1", "Started", "INFINITY") # per node resource for node in self.CM.Env["nodes"]: per_node_resources = self.NewIP("rsc_"+node) per_node_constraint = self.per_node_constraint_template % (node, "rsc_"+node, node) resources += per_node_resources constraints += per_node_constraint # Ping the test master resources += self.pingd_resource_template % os.uname()[1] # Require conectivity to run constraints += self.pingd_constraint_template % ("master-1", "master-1", "m", "Started", "m", 1) if self.CM.Env["DoFencing"]: p_name = None p_value = None entries = string.split(self.CM.Env["stonith-params"], ',') for entry in entries: (p_name, p_value) = string.split(entry, '=') if p_name == "hostlist" and p_value == "all": p_value = string.join(self.CM.Env["nodes"], " ") stonith_resource = self.stonith_resource_template % (self.CM.Env["stonith-type"], p_name, p_value) resources += stonith_resource #master slave resource resources += self.master_slave_resource % (num_nodes, 1, 1, 1) # generate cib self.cts_cib = self.cib_template % (cib_options, resources, constraints) return self.cts_cib class CIB10(CibBase): feature_set = "3.0" version = "pacemaker-1.0" cib_template = ''' ''' def _create(self, command): fixed = "HOME=/root CIB_file="+self.cib_tmpfile+" crm --force configure " + command rc = self.CM.rsh(self.target, fixed) if rc != 0: self.CM.log("Configure call failed: "+fixed) sys.exit(1) def _show(self, command=""): output = "" (rc, result) = self.CM.rsh(self.target, "HOME=/root CIB_file="+self.cib_tmpfile+" crm configure show "+command, None, ) for line in result: output += line self.CM.debug("Generated Config: "+line) return output def NewIP(self, name=None, standard="ocf:heartbeat"): ip = self.NextIP() if not name: name = "r"+ip if not standard: standard = "" else: standard += ":" self._create('''primitive %s %sIPaddr params ip=%s cidr_netmask=32 op monitor interval=5s''' % (name, standard, ip)) return name def install(self, target): old = self.cib_tmpfile # Force a rebuild self.cts_cib = None self.cib_tmpfile = CTSvars.CRM_CONFIG_DIR+"/cib.xml" self.contents(target) self.CM.rsh(self.target, "chown "+CTSvars.CRM_DAEMON_USER+" "+self.cib_tmpfile) self.cib_tmpfile = old def contents(self, target=None): # fencing resource if self.cts_cib: return self.cts_cib if not target: self.target = self.CM.Env["nodes"][0] else: self.target = target cib_base = self.cib_template % (self.feature_set, self.version, ''' remote-tls-port='9898' remote-clear-port='9999' ''') self.CM.rsh(self.target, '''echo "%s" > %s''' % (cib_base, self.cib_tmpfile)) #self.CM.rsh.cp(self.cib_tmpfile, "root@%s:%s" % (self.target, self.cib_tmpfile)) nodelist = "" self.num_nodes = 0 for node in self.CM.Env["nodes"]: nodelist += node + " " self.num_nodes = self.num_nodes + 1 no_quorum = "stop" if self.num_nodes < 3: no_quorum = "ignore" self.CM.debug("Cluster only has %d nodes, ignoring quorum" % self.num_nodes) # The shell no longer functions when the lrmd isn't running, how wonderful # Start one here and let the cluster clean it up when the full stack starts # Just hope target has the same location for lrmd self.CM.rsh(self.target, CTSvars.CRM_DAEMON_DIR+"/lrmd", synchronous=0) # Tell the shell to mind its own business, we know what we're doing self.CM.rsh(self.target, "crm options check-mode relaxed") # Fencing resource # Define first so that the shell doesn't reject every update if self.CM.Env["DoFencing"]: params = None entries = string.split(self.CM.Env["stonith-params"], ',') for entry in entries: (name, value) = string.split(entry, '=') if name == "hostlist" and value == "all": value = string.join(self.CM.Env["nodes"], " ") if params: params = ("""%s '%s="%s"' """ % (params, name, value)) else: params = ("""'%s="%s"' """ % (name, value)) if params: params = "params %s" % params else: params = "" self._create('''primitive FencingChild stonith::%s %s op monitor interval=120s timeout=300 op start interval=0 timeout=180s op stop interval=0 timeout=180s''' % (self.CM.Env["stonith-type"], params)) # Set a threshold for unreliable stonith devices such as the vmware one self._create('''clone Fencing FencingChild meta globally-unique=false migration-threshold=5''') self._create('''property stonith-enabled=%s''' % (self.CM.Env["DoFencing"])) self._create('''property start-failure-is-fatal=false pe-input-series-max=5000 default-action-timeout=60s''') self._create('''property shutdown-escalation=5min startup-fencing=false batch-limit=10 dc-deadtime=5s''') self._create('''property no-quorum-policy=%s expected-quorum-votes=%d''' % (no_quorum, self.num_nodes)) if self.CM.Env["DoBSC"] == 1: self._create('''property ident-string="Linux-HA TEST configuration file - REMOVEME!!"''') # Add resources? if self.CM.Env["CIBResource"] == 1: self.add_resources() if self.CM.cluster_monitor == 1: self._create('''primitive cluster_mon ocf:pacemaker:ClusterMon params update=10 extra_options="-r -n" user=abeekhof htmlfile=/suse/abeekhof/Export/cluster.html op start interval=0 requires=nothing op monitor interval=5s requires=nothing''') self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''') # generate cib self.cts_cib = self._show("xml") if self.cib_tmpfile != CTSvars.CRM_CONFIG_DIR+"/cib.xml": self.CM.rsh(self.target, "rm -f "+self.cib_tmpfile) return self.cts_cib def add_resources(self): # Group Resource r1 = self.NewIP() #ip = self.NextIP() #r2 = "r"+ip #self._create('''primitive %s heartbeat::IPaddr params 1=%s/32 op monitor interval=5s''' % (r2, ip)) r2 = self.NewIP() r3 = self.NewIP() self._create('''group group-1 %s %s %s''' % (r1, r2, r3)) # Per-node resources for node in self.CM.Env["nodes"]: r = self.NewIP("rsc_"+node) self._create('''location prefer-%s %s rule 100: \#uname eq %s''' % (node, r, node)) # LSB resource lsb_agent = self.CM.install_helper("LSBDummy") self._create('''primitive lsb-dummy lsb::''' +lsb_agent+ ''' op monitor interval=5s''') self._create('''colocation lsb-with-group INFINITY: lsb-dummy group-1''') self._create('''order lsb-after-group mandatory: group-1 lsb-dummy symmetrical=true''') # Migrator self._create('''primitive migrator ocf:pacemaker:Dummy meta allow-migrate=1 op monitor interval=P10S''') # Ping the test master # Use the IP where possible to avoid name lookup failures ourself = socket.gethostname() for ip in socket.gethostbyname_ex(socket.gethostname())[2]: if ip != "127.0.0.1": ourself = ip break - self._create('''primitive ping-1 ocf:pacemaker:ping params host_list=%s name=connected debug=true op monitor interval=120s''' % ourself) + self._create('''primitive ping-1 ocf:pacemaker:ping params host_list=%s name=connected debug=true op monitor interval=60s''' % ourself) self._create('''clone Connectivity ping-1 meta globally-unique=false''') #master slave resource self._create('''primitive stateful-1 ocf:pacemaker:Stateful op monitor interval=15s timeout=60s op monitor interval=16s role=Master timeout=60s ''') self._create('''ms master-1 stateful-1 meta clone-max=%d clone-node-max=%d master-max=%d master-node-max=%d''' % (self.num_nodes, 1, 1, 1)) # Require conectivity to run the master self._create('''location %s-is-connected %s rule -INFINITY: connected lt %d or not_defined connected''' % ("m1", "master-1", 1)) # Group with the master self._create('''colocation group-with-master INFINITY: group-1 master-1:Master''') self._create('''order group-after-master mandatory: master-1:promote group-1:start symmetrical=true''') class HASI(CIB10): def add_resources(self): # DLM resource self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''') self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''') # O2CB resource self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''') self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''') self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''') self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''') class ConfigFactory: def __init__(self, CM): self.CM = CM self.register("pacemaker06", CIB06, CM) self.register("pacemaker10", CIB10, CM) self.register("hae", HASI, CM) def register(self, methodName, constructor, *args, **kargs): """register a constructor""" _args = [constructor] _args.extend(args) setattr(self, methodName, apply(ConfigFactoryItem,_args, kargs)) def unregister(self, methodName): """unregister a constructor""" delattr(self, methodName) def createConfig(self, name="pacemaker-1.0"): if name == "pacemaker-0.6": name = "pacemaker06"; elif name == "pacemaker-1.0": name = "pacemaker10"; elif name == "hasi": name = "hae"; if hasattr(self, name): return getattr(self, name)() else: self.CM.log("Configuration variant '%s' is unknown. Defaulting to latest config" % name) return self.pacemaker10() class ConfigFactoryItem: def __init__(self, function, *args, **kargs): assert callable(function), "function should be a callable obj" self._function = function self._args = args self._kargs = kargs def __call__(self, *args, **kargs): """call function""" _args = list(self._args) _args.extend(args) _kargs = self._kargs.copy() _kargs.update(kargs) return apply(self._function,_args,_kargs) #CibFactory = ConfigFactory() diff --git a/cts/CM_ais.py b/cts/CM_ais.py index d772216a09..301716da14 100644 --- a/cts/CM_ais.py +++ b/cts/CM_ais.py @@ -1,347 +1,348 @@ '''CTS: Cluster Testing System: AIS dependent modules... ''' __copyright__=''' Copyright (C) 2007 Andrew Beekhof ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import os, sys, warnings from cts.CTSvars import * from cts.CM_lha import crm_lha from cts.CTS import Process ####################################################################### # # LinuxHA v2 dependent modules # ####################################################################### class crm_ais(crm_lha): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, Environment, randseed=None): crm_lha.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-ais", "UUIDQueryCmd" : "crmadmin -N --openais", "EpocheCmd" : "crm_node -e --openais", "QuorumCmd" : "crm_node -q --openais", "ParitionCmd" : "crm_node -p --openais", "Pat:They_stopped" : "%s crmd:.*Node %s: .* state=lost .new", "Pat:ChildExit" : "Child process .* exited", # Bad news Regexes. Should never occur. "BadRegexes" : ( r"ERROR:", r"CRIT:", r"Shutting down\.", r"Forcing shutdown\.", r"Timer I_TERMINATE just popped", r"input=I_ERROR", r"input=I_FAIL", r"input=I_INTEGRATED cause=C_TIMER_POPPED", r"input=I_FINALIZED cause=C_TIMER_POPPED", r"input=I_ERROR", r", exiting\.", r"WARN.*Ignoring HA message.*vote.*not in our membership list", r"pengine.*Attempting recovery of resource", r"is taking more than 2x its timeout", r"Confirm not received from", r"Welcome reply not received from", r"Attempting to schedule .* after a stop", r"Resource .* was active at shutdown", r"duplicate entries for call_id", r"Search terminated:", r"No need to invoke the TE", r":global_timer_callback", r"Faking parameter digest creation", r"Parameters to .* action changed:", r"Parameters to .* changed", r"Child process .* terminated with signal 11", r"Executing .* fencing operation", + r"ping.*: DEBUG: Updated connected = 0", ), }) def errorstoignore(self): # At some point implement a more elegant solution that # also produces a report at the end '''Return list of errors which are known and very noisey should be ignored''' if 1: return [ "crm_mon:", "crmadmin:", "async_notify: strange, client not found", "ERROR: Message hist queue is filling up" ] return [] def NodeUUID(self, node): return node def ais_components(self): fullcomplist = {} self.complist = [] self.common_ignore = [ "Pending action:", "ERROR: crm_log_message_adv:", "ERROR: MSG: No message to dump", "pending LRM operations at shutdown", "Lost connection to the CIB service", "Connection to the CIB terminated...", "Sending message to CIB service FAILED", "apply_xml_diff: Diff application failed!", "crmd: .*Action A_RECOVER .* not supported", "pingd: .*ERROR: send_update: Could not send update", "send_ipc_message: IPC Channel to .* is not connected", "unconfirmed_actions: Waiting on .* unconfirmed actions", "cib_native_msgready: Message pending on command channel", "crmd:.*do_exit: Performing A_EXIT_1 - forcefully exiting the CRMd", "verify_stopped: Resource .* was active at shutdown. You may ignore this error if it is unmanaged.", "ERROR: attrd_connection_destroy: Lost connection to attrd", "nfo: te_fence_node: Executing .* fencing operation", ] fullcomplist["cib"] = Process(self, "cib", pats = [ "State transition .* S_RECOVERY", "Respawning .* crmd", "Respawning .* attrd", "Lost connection to the CIB service", "Connection to the CIB terminated...", "Child process crmd exited .* rc=2", "Child process attrd exited .* rc=1", "crmd: .*Input I_TERMINATE from do_recover", "crmd: .*I_ERROR.*crmd_cib_connection_destroy", "crmd:.*do_exit: Could not recover from internal error", ], badnews_ignore = self.common_ignore) fullcomplist["lrmd"] = Process(self, "lrmd", pats = [ "State transition .* S_RECOVERY", "LRM Connection failed", "Respawning .* crmd", "crmd: .*I_ERROR.*lrm_connection_destroy", "Child process crmd exited .* rc=2", "crmd: .*Input I_TERMINATE from do_recover", "crmd:.*do_exit: Could not recover from internal error", ], badnews_ignore = self.common_ignore) fullcomplist["crmd"] = Process(self, "crmd", pats = [ # "WARN: determine_online_status: Node .* is unclean", # "Scheduling Node .* for STONITH", # "Executing .* fencing operation", # Only if the node wasn't the DC: "State transition S_IDLE", "State transition .* -> S_IDLE", ], badnews_ignore = self.common_ignore) fullcomplist["attrd"] = Process(self, "attrd", pats = [ "crmd: .*ERROR: attrd_connection_destroy: Lost connection to attrd" ], badnews_ignore = self.common_ignore) fullcomplist["pengine"] = Process(self, "pengine", dc_pats = [ "State transition .* S_RECOVERY", "Respawning .* crmd", "Child process crmd exited .* rc=2", "crmd: .*pe_connection_destroy: Connection to the Policy Engine failed", "crmd: .*I_ERROR.*save_cib_contents", "crmd: .*Input I_TERMINATE from do_recover", "crmd:.*do_exit: Could not recover from internal error", ], badnews_ignore = self.common_ignore) stonith_ignore = [ "update_failcount: Updating failcount for child_DoFencing", "ERROR: te_connect_stonith: Sign-in failed: triggered a retry", ] stonith_ignore.extend(self.common_ignore) fullcomplist["stonith-ng"] = Process(self, "stonith-ng", process="stonithd", pats = [ "CRIT: stonith_dispatch: Lost connection to the STONITH service", "tengine_stonith_connection_destroy: Fencing daemon connection failed", "Attempting connection to fencing daemon", "te_connect_stonith: Connected", ], badnews_ignore = stonith_ignore) vgrind = self.Env["valgrind-procs"].split() for key in fullcomplist.keys(): if self.Env["valgrind-tests"]: if key in vgrind: # Processes running under valgrind can't be shot with "killall -9 processname" self.log("Filtering %s from the component list as it is being profiled by valgrind" % key) continue if key == "stonith-ng" and not self.Env["DoFencing"]: continue self.complist.append(fullcomplist[key]) #self.complist = [ fullcomplist["pengine"] ] return self.complist class crm_whitetank(crm_ais): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, Environment, randseed=None): crm_ais.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-whitetank", "StartCmd" : CTSvars.INITDIR+"/openais start", "StopCmd" : CTSvars.INITDIR+"/openais stop", "Pat:We_stopped" : "%s.*openais.*pcmk_shutdown: Shutdown complete", "Pat:They_stopped" : "%s crmd:.*Node %s: .* state=lost .new", "Pat:They_dead" : "openais:.*Node %s is now: lost", "Pat:ChildKilled" : "%s openais.*Child process %s terminated with signal 9", "Pat:ChildRespawn" : "%s openais.*Respawning failed child process: %s", "Pat:ChildExit" : "Child process .* exited", }) def Components(self): self.ais_components() aisexec_ignore = [ "ERROR: ais_dispatch: Receiving message .* failed", "crmd: .*I_ERROR.*crmd_cib_connection_destroy", "cib: .*ERROR: cib_ais_destroy: AIS connection terminated", #"crmd: .*ERROR: crm_ais_destroy: AIS connection terminated", "crmd:.*do_exit: Could not recover from internal error", "crmd: .*I_TERMINATE.*do_recover", "attrd: .*CRIT: attrd_ais_destroy: Lost connection to OpenAIS service!", "stonithd: .*ERROR: AIS connection terminated", ] aisexec_ignore.extend(self.common_ignore) self.complist.append(Process(self, "aisexec", pats = [ "ERROR: ais_dispatch: AIS connection failed", "crmd: .*ERROR: do_exit: Could not recover from internal error", "pengine: .*Scheduling Node .* for STONITH", "stonithd: .*requests a STONITH operation RESET on node", "stonithd: .*Succeeded to STONITH the node", ], badnews_ignore = aisexec_ignore)) class crm_flatiron(crm_ais): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, Environment, randseed=None): crm_ais.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-flatiron", "StartCmd" : "service corosync start", "StopCmd" : "service corosync stop", # The next pattern is too early # "Pat:We_stopped" : "%s.*Service engine unloaded: Pacemaker Cluster Manager", # The next pattern would be preferred, but it doesn't always come out # "Pat:We_stopped" : "%s.*Corosync Cluster Engine exiting with status", "Pat:We_stopped" : "%s.*Service engine unloaded: corosync cluster quorum service", "Pat:They_stopped" : "%s crmd:.*Node %s: .* state=lost .new", "Pat:They_dead" : "corosync:.*Node %s is now: lost", "Pat:ChildKilled" : "%s corosync.*Child process %s terminated with signal 9", "Pat:ChildRespawn" : "%s corosync.*Respawning failed child process: %s", }) def Components(self): self.ais_components() corosync_ignore = [ "ERROR: ais_dispatch: Receiving message .* failed", "crmd: .*I_ERROR.*crmd_cib_connection_destroy", "cib: .*ERROR: cib_ais_destroy: AIS connection terminated", #"crmd: .*ERROR: crm_ais_destroy: AIS connection terminated", "crmd:.*do_exit: Could not recover from internal error", "crmd: .*I_TERMINATE.*do_recover", "attrd: .*CRIT: attrd_ais_destroy: Lost connection to Corosync service!", "stonithd: .*ERROR: AIS connection terminated", ] # corosync_ignore.extend(self.common_ignore) # self.complist.append(Process(self, "corosync", pats = [ # "ERROR: ais_dispatch: AIS connection failed", # "crmd: .*ERROR: do_exit: Could not recover from internal error", # "pengine: .*Scheduling Node .* for STONITH", # "stonithd: .*requests a STONITH operation RESET on node", # "stonithd: .*Succeeded to STONITH the node", # ], badnews_ignore = corosync_ignore)) return self.complist class crm_mcp(crm_flatiron): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, Environment, randseed=None): crm_flatiron.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-mcp", "StartCmd" : "service corosync start; service pacemaker start", "StopCmd" : "service pacemaker stop; service corosync stop", "Pat:We_stopped" : "%s.*Service engine unloaded: corosync cluster quorum service", "Pat:They_stopped" : "%s crmd:.*Node %s: .* state=lost .new", "Pat:They_dead" : "crmd:.*Node %s: .* state=lost .new", "Pat:ChildKilled" : "%s pacemakerd.*Child process %s terminated with signal 9", "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s", }) class crm_cman(crm_flatiron): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, Environment, randseed=None): crm_flatiron.__init__(self, Environment, randseed=randseed) self.update({ "Name" : "crm-cman", "StartCmd" : "service corosync start; service pacemaker start", "StopCmd" : "service pacemaker stop; cman_tool leave", "UUIDQueryCmd" : "crmadmin -N --cman", "EpocheCmd" : "crm_node -e --cman", "QuorumCmd" : "crm_node -q --cman", "ParitionCmd" : "crm_node -p --cman", "Pat:We_stopped" : "%s.*Service engine unloaded: corosync cluster quorum service", "Pat:They_stopped" : "%s crmd:.*Node %s: .* state=lost .new", "Pat:They_dead" : "crmd:.*Node %s: .* state=lost .new", "Pat:ChildKilled" : "%s pacemakerd.*Child process %s terminated with signal 9", "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s", })