diff --git a/cts/CTSlab.py b/cts/CTSlab.py index 9b336a5beb..a3494e1b4e 100755 --- a/cts/CTSlab.py +++ b/cts/CTSlab.py @@ -1,165 +1,158 @@ #!/usr/bin/python '''CTS: Cluster Testing System: Lab environment module ''' __copyright__ = ''' Copyright (C) 2001,2005 Alan Robertson Licensed under the GNU GPL. ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. from UserDict import UserDict import sys, types, string, string, signal, os, socket pdir = os.path.dirname(sys.path[0]) sys.path.insert(0, pdir) # So that things work from the source directory try: - from cts.CTSvars import * - from cts.CM_ais import * - from cts.CM_lha import crm_lha - from cts.CTSaudits import AuditList - from cts.CTStests import TestList + from cts.CTSvars import * + from cts.CM_ais import * + from cts.CM_lha import crm_lha + from cts.CTSaudits import AuditList + from cts.CTStests import TestList from cts.CTSscenarios import * from cts.logging import LogFactory - -except ImportError: - sys.stderr.write("abort: couldn't find cts libraries in [%s]\n" % +except ImportError as e: + sys.stderr.write("abort: %s\n" % e) + sys.stderr.write("check your install and PYTHONPATH; couldn't find cts libraries in:\n%s\n" % ' '.join(sys.path)) - sys.stderr.write("(check your install and PYTHONPATH)\n") - - # Now do it again to get more details - from cts.CTSvars import * - from cts.CM_ais import * - from cts.CM_lha import crm_lha - from cts.CTSaudits import AuditList - from cts.CTStests import TestList - from cts.CTSscenarios import * - from cts.logging import LogFactory - sys.exit(-1) + sys.exit(1) +# These are globals so they can be used by the signal handler. cm = None scenario = None - LogFactory().add_stderr() + + def sig_handler(signum, frame) : LogFactory().log("Interrupted by signal %d"%signum) if scenario: scenario.summarize() if signum == 15 : if scenario: scenario.TearDown() sys.exit(1) + if __name__ == '__main__': Environment = CtsLab(sys.argv[1:]) NumIter = Environment["iterations"] Tests = [] # Set the signal handler signal.signal(15, sig_handler) signal.signal(10, sig_handler) # Create the Cluster Manager object if Environment["Stack"] == "heartbeat": cm = crm_lha(Environment) elif Environment["Stack"] == "openais (whitetank)": cm = crm_whitetank(Environment) elif Environment["Stack"] == "corosync 2.x": cm = crm_mcp(Environment) elif Environment["Stack"] == "corosync (cman)": cm = crm_cman(Environment) elif Environment["Stack"] == "corosync (plugin v1)": cm = crm_cs_v1(Environment) elif Environment["Stack"] == "corosync (plugin v0)": cm = crm_cs_v0(Environment) else: LogFactory().log("Unknown stack: "+Environment["stack"]) sys.exit(1) if Environment["TruncateLog"] == 1: - Environment.log("Truncating %s" % LogFile) - lf = open(LogFile, "w"); - if lf != None: - lf.truncate(0) - lf.close() + if Environment["OutputFile"] is None: + LogFactory().log("Ignoring truncate request because no output file specified") + else: + LogFactory().log("Truncating %s" % Environment["OutputFile"]) + with open(Environment["OutputFile"], "w") as outputfile: + outputfile.truncate(0) Audits = AuditList(cm) if Environment["ListTests"] == 1: Tests = TestList(cm, Audits) LogFactory().log("Total %d tests"%len(Tests)) for test in Tests : LogFactory().log(str(test.name)); sys.exit(0) elif len(Environment["tests"]) == 0: Tests = TestList(cm, Audits) else: Chosen = Environment["tests"] for TestCase in Chosen: match = None for test in TestList(cm, Audits): if test.name == TestCase: match = test if not match: usage("--choose: No applicable/valid tests chosen") else: Tests.append(match) # Scenario selection if Environment["scenario"] == "basic-sanity": scenario = RandomTests(cm, [ BasicSanityCheck(Environment) ], Audits, Tests) elif Environment["scenario"] == "all-once": NumIter = len(Tests) scenario = AllOnce( cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests) elif Environment["scenario"] == "sequence": scenario = Sequence( cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests) elif Environment["scenario"] == "boot": scenario = Boot(cm, [ LeaveBooted(Environment)], Audits, []) else: scenario = RandomTests( cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests) LogFactory().log(">>>>>>>>>>>>>>>> BEGINNING " + repr(NumIter) + " TESTS ") LogFactory().log("Stack: %s (%s)" % (Environment["Stack"], Environment["Name"])) LogFactory().log("Schema: %s" % Environment["Schema"]) LogFactory().log("Scenario: %s" % scenario.__doc__) LogFactory().log("CTS Master: %s" % Environment["cts-master"]) LogFactory().log("CTS Logfile: %s" % Environment["OutputFile"]) LogFactory().log("Random Seed: %s" % Environment["RandSeed"]) LogFactory().log("Syslog variant: %s" % Environment["syslogd"].strip()) LogFactory().log("System log files: %s" % Environment["LogFileName"]) -# Environment.log(" ") if Environment.has_key("IPBase"): LogFactory().log("Base IP for resources: %s" % Environment["IPBase"]) LogFactory().log("Cluster starts at boot: %d" % Environment["at-boot"]) Environment.dump() rc = Environment.run(scenario, NumIter) sys.exit(rc) diff --git a/cts/README b/cts/README index 5fa4841e42..b2ff427de5 100644 --- a/cts/README +++ b/cts/README @@ -1,192 +1,138 @@ -BASIC REQUIREMENTS BEFORE STARTING: - -Three or more machines: one test exerciser and two or more test cluster machines. - - The two test cluster machines need to be on the same subnet - and they should have journalling filesystems for - all of their filesystems other than /boot - You also need a number of free IP addresses on that subnet to test - mutual IP address takeover - - The test exerciser machine doesn't need to be on the same subnet - as the test cluster machines. Minimal demands are made on the - exerciser machine - it just has to stay up during the tests. - However, it does need to have a current copy of the cts test - scripts. It is worth noting that these scripts are coordinated - with particular versions of Pacemaker, so that in general you - have to have the same version of test scripts as the rest of - Pacemaker. + + PACEMAKER + CLUSTER TEST SUITE (CTS) + + +Purpose +------- + +CTS thoroughly exercises a pacemaker test cluster by running a randomized +series of predefined tests on the cluster. CTS can be run against a +pre-existing cluster configuration or (more typically) overwrite the existing +configuration with a test configuration. + + +Requirements +------------ + +* Three or more machines (one test exerciser and two or more test cluster + machines). + +* The test cluster machines should be on the same subnet and have journalling + filesystems (ext3, ext4, xfs, etc.) for all of their filesystems other than + /boot. You also need a number of free IP addresses on that subnet if you + intend to test mutual IP address takeover. + +* The test exerciser machine doesn't need to be on the same subnet as the test + cluster machines. Minimal demands are made on the exerciser machine - it + just has to stay up during the tests. + +* It helps a lot in tracking problems if all machines' clocks are closely + synchronized. NTP does this automatically, but you can do it by hand if you + want. + +* The exerciser needs to be able to ssh over to the cluster nodes as root + without a password challenge. Configure ssh accordingly (see the Mini-HOWTO + at the end of this document for more details). + +* The exerciser needs to be able to resolve the machine names of the + test cluster - either by DNS or by /etc/hosts. + +Preparation +----------- -Install Pacemaker on all machines. +Install Pacemaker (including CTS) on all machines. These scripts are +coordinated with particular versions of Pacemaker, so you need the same version +of CTS as the rest of Pacemaker, and you need the same version of +pacemaker and CTS on both the test exerciser and the test cluster machines. Configure cluster communications (Corosync, CMAN or Heartbeat) on the cluster machines and verify everything works. NOTE: Do not run the cluster on the test exerciser machine. NOTE: Wherever machine names are mentioned in these configuration files, they must match the machines' `uname -n` name. This may or may not match the machines' FQDN (fully qualified domain name) - it depends on how you (and your OS) have named the machines. -It helps a lot in tracking problems if the three machines' clocks are -closely synchronized. xntpd does this, but you can do it by hand if -you want. - -Make sure all your filesystems are journalling filesystems (/boot can be -ext2 if you want). This means filesystems like ext3. +Run CTS +------- -Here's what you need to do to run CTS: +Now assuming you did all this, what you need to do is run CTSlab.py: -The exerciser needs to be able to ssh over to the cluster nodes as root -without a password challenge. Configure ssh accordingly. - (see the Mini-HOWTOs at the end for more details) - -The exerciser needs to be able to resolve the machine names of the -test cluster - either by DNS or by /etc/hosts. + python ./CTSlab.py [options] number-of-tests-to-run +You must specify which nodes are part of the cluster with --nodes, e.g.: -Now assuming you did all this, what you need to do is run CTSlab.py + --node "pcmk-1 pcmk-2 pcmk-3" - python ./CTSlab.py [options] number-of-tests-to-run +Most people will want to save the output with --outputfile, e.g.: -You must specify which nodes are part of the cluster: - --nodes, eg. --node "pcmk-1 pcmk-2 pcmk-3" + --outputfile ~/cts.log -Most people will want to save the output: - --outputfile, eg. --outputfile ~/cts.log +Unless you want to test your pre-existing cluster configuration, you also want: -Unless you want to test your own cluster configuration, you will also want: --clobber-cib --populate-resources - --test-ip-base, eg. --test-ip-base 192.168.9.100 + --test-ip-base $IP # e.g. --test-ip-base 192.168.9.100 - and configure some sort of fencing: - --stonith, eg. --stonith rhcs to use fence_xvm or --stonith lha to use external/ssh +and configure some sort of fencing: + + --stonith $TYPE # e.g. "--stonith rhcs" to use fence_xvm or "--stonith lha" to use external/ssh A complete command line might look like: python ./CTSlab.py --nodes "pcmk-1 pcmk-2 pcmk-3" --outputfile ~/cts.log \ --clobber-cib --populate-resources --test-ip-base 192.168.9.100 \ --stonith rhcs 50 +For more options, use the --help option. -For other options, use the --help option and see the Mini-HOWTOs at the end for more details on setting up external/ssh. +To extract the result of a particular test, run: -HINT: To extract the result of a particular test, run: crm_report -T $test +Mini-HOWTO: Allow passwordless remote SSH connections +----------------------------------------------------- +The CTS scripts run "ssh -l root" so you don't have to do any of your testing +logged in as root on the test machine. Here is how to allow such connections +without requiring a password to be entered each time: -============== -Mini-HOWTOs: -============== - --------------------------------------------------------------------------------- -How to make OpenSSH allow you to login as root across the network without -a password. --------------------------------------------------------------------------------- - -All our scripts run ssh -l root, so you don't have to do any of your testing -logged in as root on the test machine - -1) Grab your key from the exerciser machine: - - take the single line out of ~/.ssh/identity.pub - and put it into root's authorized_keys file. - [This has changed to: copying the line from ~/.ssh/id_dsa.pub into - root's authorized_keys file ] +* On your test exerciser, create an SSH key if you do not already have one. + Most commonly, SSH keys will be in your ~/.ssh directory, with the + private key file not having an extension, and the public key file + named the same with the extension ".pub" (for example, ~/.ssh/id_dsa.pub). - NOTE: If you don't have an id_dsa.pub file, create it by running: + If you don't already have a key, you can create one with: ssh-keygen -t dsa -2) Run this command on each of the cluster machines as root: - -ssh -v -l myid ererciser-machine cat /home/myid/.ssh/identity.pub \ - >> ~root/.ssh/authorized_keys - -[For most people, this has changed to: - ssh -v -l myid exerciser-machine cat /home/myid/.ssh/id_dsa.pub \ - >> ~root/.ssh/authorized_keys -] - - You will probably have to provide your password, and possibly say - "yes" to some questions about accepting the identity of the - test machines - -3) You must also do the corresponding update for the exerciser - machine itself as root: - - cat /home/myid/.ssh/identity.pub >> ~root/.ssh/authorized_keys - - To test this, try this command from the exerciser machine for each - of your cluster machines, and for the exerciser machine itself. - -ssh -l root cluster-machine - -If this works without prompting for a password, you're in business... -If not, you need to look at the ssh/openssh documentation and the output from -the -v options above... - --------------------------------------------------------------------------------- -How to configure OpenSSH for StonithdTest --------------------------------------------------------------------------------- - -This configure enables cluster machines to ssh over to each other without a -password challenge. - -1) On each of the cluster machines, grab your key: - - take the single line out of ~/.ssh/identity.pub - and put it into root's authorized_keys file. - [This has changed to: copying the line from ~/.ssh/id_dsa.pub into - root's authorized_keys file ] - - NOTE: If you don't have an id_dsa.pub file, create it by running: - - ssh-keygen -t dsa - -2) Run this command on each of the cluster machines as root: - -ssh -v -l myid cluster_machine_1 cat /home/myid/.ssh/identity.pub \ - >> ~root/.ssh/authorized_keys - -ssh -v -l myid cluster_machine_2 cat /home/myid/.ssh/identity.pub \ - >> ~root/.ssh/authorized_keys - -...... - -ssh -v -l myid cluster_machine_n cat /home/myid/.ssh/identity.pub \ - >> ~root/.ssh/authorized_keys - -[For most people, this has changed to: - ssh -v -l myid cluster_machine cat /home/myid/.ssh/id_dsa.pub \ - >> ~root/.ssh/authorized_keys -] +* From your test exerciser, authorize your SSH public key for root on all test + machines (both the exerciser and the cluster test machines): - You will probably have to provide your password, and possibly say - "yes" to some questions about accepting the identity of the - test machines + ssh-copy-id -i ~/.ssh/id_dsa.pub root@$MACHINE -To test this, try this command from any machine for each -of other cluster machines, and for the machine itself. + You will probably have to provide your password, and possibly say + "yes" to some questions about accepting the identity of the test machines. - ssh -l root cluster-machine + The above assumes you have a DSA SSH key in the specified location; + if you have some other type of key (RSA, ECDSA, etc.), use its file name + in the -i option above. -This should work without prompting for a password, -If not, you need to look at the ssh/openssh documentation and the output from -the -v options above... + If you have an old version of SSH that doesn't have ssh-copy-id, + you can take the single line out of your public key file + (e.g. ~/.ssh/identity.pub or ~/.ssh/id_dsa.pub) and manually add it to + root's ~/.ssh/authorized_keys file on each test machine. -3) Make sure the 'at' daemon is enabled on the test cluster machines +* To test, try this command from the exerciser machine for each + of your cluster machines, and for the exerciser machine itself. -This is normally the 'atd' service started by /etc/init.d/atd). This -doesn't mean just start it, it means enable it to start on every boot -into your default init state (probably either 3 or 5). + ssh -l root $MACHINE -Usually this can be achieved with: - chkconfig --add atd - chkconfig atd on + If this works without prompting for a password, you're in business. + If not, look at the documentation for your version of ssh. diff --git a/cts/environment.py b/cts/environment.py index 2b2a3438e2..e76c36d29e 100644 --- a/cts/environment.py +++ b/cts/environment.py @@ -1,677 +1,680 @@ ''' Classes related to producing and searching logs ''' __copyright__=''' Copyright (C) 2014 Andrew Beekhof Licensed under the GNU GPL. ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. import types, string, select, sys, time, re, os, struct, signal, socket import time, syslog, random, traceback, base64, pickle, binascii, fcntl from cts.remote import * class Environment: def __init__(self, args): - print repr(self) self.data = {} self.Nodes = [] self["DeadTime"] = 300 self["StartTime"] = 300 self["StableTime"] = 30 self["tests"] = [] self["IPagent"] = "IPaddr2" self["DoStandby"] = 1 self["DoFencing"] = 1 self["XmitLoss"] = "0.0" self["RecvLoss"] = "0.0" self["ClobberCIB"] = 0 self["CIBfilename"] = None self["CIBResource"] = 0 self["DoBSC"] = 0 self["use_logd"] = 0 self["oprofile"] = [] self["warn-inactive"] = 0 self["ListTests"] = 0 self["benchmark"] = 0 self["LogWatcher"] = "any" self["SyslogFacility"] = "daemon" self["LogFileName"] = "/var/log/messages" self["Schema"] = "pacemaker-2.0" self["Stack"] = "corosync" self["stonith-type"] = "external/ssh" self["stonith-params"] = "hostlist=all,livedangerously=yes" self["loop-minutes"] = 60 self["valgrind-prefix"] = None self["valgrind-procs"] = "cib crmd attrd pengine stonith-ng" self["valgrind-opts"] = """--leak-check=full --show-reachable=yes --trace-children=no --num-callers=25 --gen-suppressions=all --suppressions="""+CTSvars.CTS_home+"""/cts.supp""" self["experimental-tests"] = 0 self["container-tests"] = 0 self["valgrind-tests"] = 0 self["unsafe-tests"] = 1 self["loop-tests"] = 1 self["scenario"] = "random" self["stats"] = 0 self["docker"] = 0 self.RandomGen = random.Random() self.logger = LogFactory() self.SeedRandom() self.rsh = RemoteFactory().getInstance() self.target = "localhost" self.parse_args(args) self.discover() self.validate() def SeedRandom(self, seed=None): if not seed: seed = int(time.time()) if self.has_key("RandSeed"): self.logger.log("New random seed is: " + str(seed)) else: self.logger.log("Random seed is: " + str(seed)) self["RandSeed"] = seed self.RandomGen.seed(str(seed)) def dump(self): keys = [] for key in self.data.keys(): keys.append(key) keys.sort() for key in keys: self.logger.debug("Environment["+key+"]:\t"+str(self[key])) def keys(self): return self.data.keys() def has_key(self, key): if key == "nodes": return True return self.data.has_key(key) def __getitem__(self, key): if key == "nodes": return self.Nodes elif key == "Name": return self.get_stack_short() elif self.data.has_key(key): return self.data[key] else: return None def __setitem__(self, key, value): if key == "Stack": self.set_stack(value) elif key == "node-limit": self.data[key] = value self.filter_nodes() elif key == "nodes": self.Nodes = [] for node in value: # I don't think I need the IP address, etc. but this validates # the node name against /etc/hosts and/or DNS, so it's a # GoodThing(tm). try: n = node.strip() if self.data["docker"] == 0: gethostbyname_ex(n) self.Nodes.append(n) except: self.logger.log(node+" not found in DNS... aborting") raise self.filter_nodes() else: self.data[key] = value def RandomNode(self): '''Choose a random node from the cluster''' return self.RandomGen.choice(self["nodes"]) def set_stack(self, name): # Normalize stack names if name == "heartbeat" or name == "lha": self.data["Stack"] = "heartbeat" elif name == "openais" or name == "ais" or name == "whitetank": self.data["Stack"] = "openais (whitetank)" elif name == "corosync" or name == "cs" or name == "mcp": self.data["Stack"] = "corosync 2.x" elif name == "cman": self.data["Stack"] = "corosync (cman)" elif name == "v1": self.data["Stack"] = "corosync (plugin v1)" elif name == "v0": self.data["Stack"] = "corosync (plugin v0)" else: print "Unknown stack: "+name sys.exit(1) def get_stack_short(self): # Create the Cluster Manager object if not self.data.has_key("Stack"): return "unknown" elif self.data["Stack"] == "heartbeat": return "crm-lha" elif self.data["Stack"] == "corosync 2.x": if self["docker"]: return "crm-mcp-docker" else: return "crm-mcp" elif self.data["Stack"] == "corosync (cman)": return "crm-cman" elif self.data["Stack"] == "corosync (plugin v1)": return "crm-plugin-v1" elif self.data["Stack"] == "corosync (plugin v0)": return "crm-plugin-v0" else: LogFactory().log("Unknown stack: "+self.data["stack"]) sys.exit(1) def detect_syslog(self): # Detect syslog variant if not self.has_key("syslogd"): if self["have_systemd"]: # Systemd self["syslogd"] = self.rsh(self.target, "systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", stdout=1).strip() else: # SYS-V self["syslogd"] = self.rsh(self.target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", stdout=1).strip() if not self.has_key("syslogd") or not self["syslogd"]: # default self["syslogd"] = "rsyslog" def detect_at_boot(self): # Detect if the cluster starts at boot if not self.has_key("at-boot"): atboot = 0 if self["have_systemd"]: # Systemd atboot = atboot or not self.rsh(self.target, "systemctl is-enabled heartbeat.service") atboot = atboot or not self.rsh(self.target, "systemctl is-enabled corosync.service") atboot = atboot or not self.rsh(self.target, "systemctl is-enabled pacemaker.service") else: # SYS-V atboot = atboot or not self.rsh(self.target, "chkconfig --list | grep -e corosync.*on -e heartbeat.*on -e pacemaker.*on") self["at-boot"] = atboot def detect_ip_offset(self): # Try to determin an offset for IPaddr resources if self["CIBResource"] and not self.has_key("IPBase"): network=self.rsh(self.target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", stdout=1).strip() self["IPBase"] = self.rsh(self.target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, stdout=1).strip() if not self["IPBase"]: self["IPBase"] = " fe80::1234:56:7890:1000" self.logger.log("Could not determine an offset for IPaddr resources. Perhaps nmap is not installed on the nodes.") self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"]) elif int(self["IPBase"].split('.')[3]) >= 240: self.logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s" % (self["IPBase"], self["IPBase"].split('.')[3])) self["IPBase"] = " fe80::1234:56:7890:1000" self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"]) def filter_nodes(self): if self["node-limit"] > 0: if len(self["nodes"]) > self["node-limit"]: self.logger.log("Limiting the number of nodes configured=%d (max=%d)" %(len(self["nodes"]), self["node-limit"])) while len(self["nodes"]) > self["node-limit"]: self["nodes"].pop(len(self["nodes"])-1) def validate(self): if len(self["nodes"]) < 1: print "No nodes specified!" sys.exit(1) def discover(self): self.target = random.Random().choice(self["nodes"]) master = socket.gethostname() # Use the IP where possible to avoid name lookup failures for ip in socket.gethostbyname_ex(master)[2]: if ip != "127.0.0.1": master = ip break; self["cts-master"] = master if not self.has_key("have_systemd"): self["have_systemd"] = not self.rsh(self.target, "systemctl list-units") self.detect_syslog() self.detect_at_boot() self.detect_ip_offset() self.validate() def parse_args(self, args): skipthis=None if not args: args=sys.argv[1:] for i in range(0, len(args)): if skipthis: skipthis=None continue elif args[i] == "-l" or args[i] == "--limit-nodes": skipthis=1 self["node-limit"] = int(args[i+1]) elif args[i] == "-r" or args[i] == "--populate-resources": self["CIBResource"] = 1 self["ClobberCIB"] = 1 elif args[i] == "--outputfile": skipthis=1 self["OutputFile"] = args[i+1] LogFactory().add_file(self["OutputFile"]) elif args[i] == "-L" or args[i] == "--logfile": skipthis=1 self["LogWatcher"] = "remote" self["LogAuditDisabled"] = 1 self["LogFileName"] = args[i+1] elif args[i] == "--ip" or args[i] == "--test-ip-base": skipthis=1 self["IPBase"] = args[i+1] self["CIBResource"] = 1 self["ClobberCIB"] = 1 elif args[i] == "--oprofile": skipthis=1 self["oprofile"] = args[i+1].split(' ') elif args[i] == "--trunc": self["TruncateLog"]=1 elif args[i] == "--list-tests" or args[i] == "--list" : self["ListTests"]=1 elif args[i] == "--benchmark": self["benchmark"]=1 elif args[i] == "--bsc": self["DoBSC"] = 1 self["scenario"] = "basic-sanity" elif args[i] == "--qarsh": RemoteFactory().enable_qarsh() elif args[i] == "--docker": self["docker"] = 1 RemoteFactory().enable_docker() elif args[i] == "--stonith" or args[i] == "--fencing": skipthis=1 if args[i+1] == "1" or args[i+1] == "yes": self["DoFencing"]=1 elif args[i+1] == "0" or args[i+1] == "no": self["DoFencing"]=0 elif args[i+1] == "rhcs" or args[i+1] == "xvm" or args[i+1] == "virt": self["DoStonith"]=1 self["stonith-type"] = "fence_xvm" self["stonith-params"] = "pcmk_arg_map=domain:uname,delay=0" elif args[i+1] == "docker": self["DoStonith"]=1 self["stonith-type"] = "fence_docker_cts" elif args[i+1] == "scsi": self["DoStonith"]=1 self["stonith-type"] = "fence_scsi" self["stonith-params"] = "delay=0" elif args[i+1] == "ssh" or args[i+1] == "lha": self["DoStonith"]=1 self["stonith-type"] = "external/ssh" self["stonith-params"] = "hostlist=all,livedangerously=yes" elif args[i+1] == "north": self["DoStonith"]=1 self["stonith-type"] = "fence_apc" self["stonith-params"] = "ipaddr=north-apc,login=apc,passwd=apc,pcmk_host_map=north-01:2;north-02:3;north-03:4;north-04:5;north-05:6;north-06:7;north-07:9;north-08:10;north-09:11;north-10:12;north-11:13;north-12:14;north-13:15;north-14:18;north-15:17;north-16:19;" elif args[i+1] == "south": self["DoStonith"]=1 self["stonith-type"] = "fence_apc" self["stonith-params"] = "ipaddr=south-apc,login=apc,passwd=apc,pcmk_host_map=south-01:2;south-02:3;south-03:4;south-04:5;south-05:6;south-06:7;south-07:9;south-08:10;south-09:11;south-10:12;south-11:13;south-12:14;south-13:15;south-14:18;south-15:17;south-16:19;" elif args[i+1] == "east": self["DoStonith"]=1 self["stonith-type"] = "fence_apc" self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;" elif args[i+1] == "west": self["DoStonith"]=1 self["stonith-type"] = "fence_apc" self["stonith-params"] = "ipaddr=west-apc,login=apc,passwd=apc,pcmk_host_map=west-01:2;west-02:3;west-03:4;west-04:5;west-05:6;west-06:7;west-07:9;west-08:10;west-09:11;west-10:12;west-11:13;west-12:14;west-13:15;west-14:18;west-15:17;west-16:19;" elif args[i+1] == "openstack": self["DoStonith"]=1 self["stonith-type"] = "fence_openstack" print "Obtaining OpenStack credentials from the current environment" self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % ( os.environ['OS_REGION_NAME'], os.environ['OS_TENANT_NAME'], os.environ['OS_AUTH_URL'], os.environ['OS_USERNAME'], os.environ['OS_PASSWORD'] ) elif args[i+1] == "rhevm": self["DoStonith"]=1 self["stonith-type"] = "fence_rhevm" print "Obtaining RHEV-M credentials from the current environment" self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % ( os.environ['RHEVM_USERNAME'], os.environ['RHEVM_PASSWORD'], os.environ['RHEVM_SERVER'], os.environ['RHEVM_PORT'], ) else: self.usage(args[i+1]) elif args[i] == "--stonith-type": self["stonith-type"] = args[i+1] skipthis=1 elif args[i] == "--stonith-args": self["stonith-params"] = args[i+1] skipthis=1 elif args[i] == "--standby": skipthis=1 if args[i+1] == "1" or args[i+1] == "yes": self["DoStandby"] = 1 elif args[i+1] == "0" or args[i+1] == "no": self["DoStandby"] = 0 else: self.usage(args[i+1]) elif args[i] == "--clobber-cib" or args[i] == "-c": self["ClobberCIB"] = 1 elif args[i] == "--cib-filename": skipthis=1 self["CIBfilename"] = args[i+1] elif args[i] == "--xmit-loss": try: float(args[i+1]) except ValueError: print ("--xmit-loss parameter should be float") self.usage(args[i+1]) skipthis=1 self["XmitLoss"] = args[i+1] elif args[i] == "--recv-loss": try: float(args[i+1]) except ValueError: print ("--recv-loss parameter should be float") self.usage(args[i+1]) skipthis=1 self["RecvLoss"] = args[i+1] elif args[i] == "--choose": skipthis=1 self["tests"].append(args[i+1]) self["scenario"] = "sequence" elif args[i] == "--nodes": skipthis=1 self["nodes"] = args[i+1].split(' ') elif args[i] == "-g" or args[i] == "--group" or args[i] == "--dsh-group": skipthis=1 self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args[i+1]) LogFactory().add_file(self["OutputFile"], "CTS") dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args[i+1]) # Hacks to make my life easier if args[i+1] == "r6": self["Stack"] = "cman" self["DoStonith"]=1 self["stonith-type"] = "fence_xvm" self["stonith-params"] = "delay=0" self["IPBase"] = " fe80::1234:56:7890:4000" elif args[i+1] == "virt1": self["Stack"] = "corosync" self["DoStonith"]=1 self["stonith-type"] = "fence_xvm" self["stonith-params"] = "delay=0" self["IPBase"] = " fe80::1234:56:7890:1000" elif args[i+1] == "east16" or args[i+1] == "nsew": self["Stack"] = "corosync" self["DoStonith"]=1 self["stonith-type"] = "fence_apc" self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;" self["IPBase"] = " fe80::1234:56:7890:2000" if args[i+1] == "east16": # Requires newer python than available via nsew self["IPagent"] = "Dummy" elif args[i+1] == "corosync8": self["Stack"] = "corosync" self["DoStonith"]=1 self["stonith-type"] = "fence_rhevm" print "Obtaining RHEV-M credentials from the current environment" self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % ( os.environ['RHEVM_USERNAME'], os.environ['RHEVM_PASSWORD'], os.environ['RHEVM_SERVER'], os.environ['RHEVM_PORT'], ) self["IPBase"] = " fe80::1234:56:7890:3000" if os.path.isfile(dsh_file): self["nodes"] = [] f = open(dsh_file, 'r') for line in f: l = line.strip().rstrip() if not l.startswith('#'): self["nodes"].append(l) f.close() else: print("Unknown DSH group: %s" % args[i+1]) elif args[i] == "--syslog-facility" or args[i] == "--facility": skipthis=1 self["SyslogFacility"] = args[i+1] elif args[i] == "--seed": skipthis=1 self.SeedRandom(args[i+1]) elif args[i] == "--warn-inactive": self["warn-inactive"] = 1 elif args[i] == "--schema": skipthis=1 self["Schema"] = args[i+1] elif args[i] == "--ais": self["Stack"] = "openais" elif args[i] == "--at-boot" or args[i] == "--cluster-starts-at-boot": skipthis=1 if args[i+1] == "1" or args[i+1] == "yes": self["at-boot"] = 1 elif args[i+1] == "0" or args[i+1] == "no": self["at-boot"] = 0 else: self.usage(args[i+1]) elif args[i] == "--heartbeat" or args[i] == "--lha": self["Stack"] = "heartbeat" elif args[i] == "--hae": self["Stack"] = "openais" self["Schema"] = "hae" elif args[i] == "--stack": if args[i+1] == "fedora" or args[i+1] == "fedora-17" or args[i+1] == "fedora-18": self["Stack"] = "corosync" elif args[i+1] == "rhel-6": self["Stack"] = "cman" elif args[i+1] == "rhel-7": self["Stack"] = "corosync" else: self["Stack"] = args[i+1] skipthis=1 elif args[i] == "--once": self["scenario"] = "all-once" elif args[i] == "--boot": self["scenario"] = "boot" elif args[i] == "--valgrind-tests": self["valgrind-tests"] = 1 elif args[i] == "--no-loop-tests": self["loop-tests"] = 0 elif args[i] == "--loop-minutes": skipthis=1 try: self["loop-minutes"]=int(args[i+1]) except ValueError: self.usage(args[i]) elif args[i] == "--no-unsafe-tests": self["unsafe-tests"] = 0 elif args[i] == "--experimental-tests": self["experimental-tests"] = 1 elif args[i] == "--container-tests": self["container-tests"] = 1 elif args[i] == "--set": skipthis=1 (name, value) = args[i+1].split('=') self[name] = value print "Setting %s = %s" % (name, value) + elif args[i] == "--help": + self.usage(args[i], 0) + elif args[i] == "--": break else: try: NumIter=int(args[i]) self["iterations"] = NumIter except ValueError: self.usage(args[i]) - def usage(arg, status=1): - print "Illegal argument %s" % (arg) + def usage(self, arg, status=1): + if status: + print "Illegal argument %s" % arg print "usage: " + sys.argv[0] +" [options] number-of-iterations" print "\nCommon options: " print "\t [--nodes 'node list'] list of cluster nodes separated by whitespace" print "\t [--group | -g 'name'] use the nodes listed in the named DSH group (~/.dsh/groups/$name)" print "\t [--limit-nodes max] only use the first 'max' cluster nodes supplied with --nodes" print "\t [--stack (v0|v1|cman|corosync|heartbeat|openais)] which cluster stack is installed" print "\t [--list-tests] list the valid tests" print "\t [--benchmark] add the timing information" print "\t " print "Options that CTS will usually auto-detect correctly: " print "\t [--logfile path] where should the test software look for logs from cluster nodes" print "\t [--syslog-facility name] which syslog facility should the test software log to" print "\t [--at-boot (1|0)] does the cluster software start at boot time" print "\t [--test-ip-base ip] offset for generated IP address resources" print "\t " print "Options for release testing: " print "\t [--populate-resources | -r] generate a sample configuration" print "\t [--choose name] run only the named test" print "\t [--stonith (1 | 0 | yes | no | rhcs | ssh)]" print "\t [--once] run all valid tests once" print "\t " print "Additional (less common) options: " print "\t [--clobber-cib | -c ] erase any existing configuration" print "\t [--outputfile path] optional location for the test software to write logs to" print "\t [--trunc] truncate logfile before starting" print "\t [--xmit-loss lost-rate(0.0-1.0)]" print "\t [--recv-loss lost-rate(0.0-1.0)]" print "\t [--standby (1 | 0 | yes | no)]" print "\t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]" print "\t [--stonith-type type]" print "\t [--stonith-args name=value]" print "\t [--bsc]" print "\t [--no-loop-tests] dont run looping/time-based tests" print "\t [--no-unsafe-tests] dont run tests that are unsafe for use with ocfs2/drbd" print "\t [--valgrind-tests] include tests using valgrind" print "\t [--experimental-tests] include experimental tests" print "\t [--container-tests] include pacemaker_remote tests that run in lxc container resources" print "\t [--oprofile 'node list'] list of cluster nodes to run oprofile on]" print "\t [--qarsh] use the QARSH backdoor to access nodes instead of SSH" print "\t [--docker] Indicates nodes are docker nodes." print "\t [--seed random_seed]" print "\t [--set option=value]" print "\t " print "\t Example: " print "\t python sys.argv[0] -g virt1 --stack cs -r --stonith ssh --schema pacemaker-1.0 500" sys.exit(status) class EnvFactory: instance = None def __init__(self): pass def getInstance(self, args=None): if not EnvFactory.instance: EnvFactory.instance = Environment(args) return EnvFactory.instance diff --git a/cts/patterns.py b/cts/patterns.py index 13307e5e62..fe5299a5fd 100644 --- a/cts/patterns.py +++ b/cts/patterns.py @@ -1,532 +1,532 @@ from UserDict import UserDict import sys, time, types, syslog, os, struct, string, signal, traceback, warnings, socket from cts.CTSvars import * patternvariants = {} class BasePatterns: def __init__(self, name): self.name = name patternvariants[name] = self self.ignore = [] self.BadNews = [] self.components = {} self.commands = { "StatusCmd" : "crmadmin -t 60000 -S %s 2>/dev/null", "CibQuery" : "cibadmin -Ql", "CibAddXml" : "cibadmin --modify -c --xml-text %s", "CibDelXpath" : "cibadmin --delete --xpath %s", # 300,000 == 5 minutes "RscRunning" : CTSvars.CRM_DAEMON_DIR + "/lrmd_test -R -r %s", "CIBfile" : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml", "TmpDir" : "/tmp", "BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1", "FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1", # tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit # tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated # tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1 # tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null "ReduceCommCmd" : "", "RestoreCommCmd" : "tc qdisc del dev lo root", "UUIDQueryCmd" : "crmadmin -N", "MaintenanceModeOn" : "cibadmin --modify -c --xml-text ''", "MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"", "StandbyCmd" : "crm_attribute -VQ -U %s -n standby -l forever -v %s 2>/dev/null", "StandbyQueryCmd" : "crm_attribute -QG -U %s -n standby -l forever -d off 2>/dev/null", } self.search = { "Pat:DC_IDLE" : "crmd.*State transition.*-> S_IDLE", # This wont work if we have multiple partitions "Pat:Local_started" : "%s\W.*The local CRM is operational", "Pat:Slave_started" : "%s\W.*State transition.*-> S_NOT_DC", "Pat:Master_started": "%s\W.*State transition.*-> S_IDLE", "Pat:We_stopped" : "heartbeat.*%s.*Heartbeat shutdown complete", "Pat:Logd_stopped" : "%s\W.*logd:.*Exiting write process", "Pat:They_stopped" : "%s\W.*LOST:.* %s ", "Pat:They_dead" : "node %s.*: is dead", "Pat:TransitionComplete" : "Transition status: Complete: complete", "Pat:Fencing_start" : "Initiating remote operation .* for %s", "Pat:Fencing_ok" : "stonith.*remote_op_done:.*Operation .* of %s by .*: OK", "Pat:RscOpOK" : "process_lrm_event:.*Operation %s_%s.*ok.*confirmed", "Pat:RscRemoteOpOK" : "process_lrm_event:.*Operation %s_%s.*ok.*node=%s, .*confirmed.*true", "Pat:NodeFenced" : "tengine_stonith_notify:.*Peer %s was terminated .*: OK", "Pat:FenceOpOK" : "Operation .* for host '%s' with device .* returned: 0", } def get_component(self, key): if self.components.has_key(key): return self.components[key] print "Unknown component '%s' for %s" % (key, self.name) return [] def get_patterns(self, key): if key == "BadNews": return self.BadNews elif key == "BadNewsIgnore": return self.ignore elif key == "Commands": return self.commands elif key == "Search": return self.search elif key == "Components": return self.components def __getitem__(self, key): if key == "Name": return self.name elif self.commands.has_key(key): return self.commands[key] elif self.search.has_key(key): return self.search[key] else: print "Unknown template '%s' for %s" % (key, self.name) return None class crm_lha(BasePatterns): def __init__(self, name): BasePatterns.__init__(self, name) self.commands.update({ "StartCmd" : "service heartbeat start > /dev/null 2>&1", "StopCmd" : "service heartbeat stop > /dev/null 2>&1", "EpocheCmd" : "crm_node -H -e", "QuorumCmd" : "crm_node -H -q", "ParitionCmd" : "crm_node -H -p", }) self.search.update({ # Patterns to look for in the log files for various occasions... "Pat:ChildKilled" : "%s\W.*heartbeat.*%s.*killed by signal 9", "Pat:ChildRespawn" : "%s\W.*heartbeat.*Respawning client.*%s", "Pat:ChildExit" : "(ERROR|error): Client .* exited with return code", }) self.BadNews = [ r"error:", r"crit:", r"ERROR:", r"CRIT:", r"Shutting down...NOW", r"Timer I_TERMINATE just popped", r"input=I_ERROR", r"input=I_FAIL", r"input=I_INTEGRATED cause=C_TIMER_POPPED", r"input=I_FINALIZED cause=C_TIMER_POPPED", r"input=I_ERROR", r", exiting\.", r"WARN.*Ignoring HA message.*vote.*not in our membership list", r"pengine.*Attempting recovery of resource", r"is taking more than 2x its timeout", r"Confirm not received from", r"Welcome reply not received from", r"Attempting to schedule .* after a stop", r"Resource .* was active at shutdown", r"duplicate entries for call_id", r"Search terminated:", r"No need to invoke the TE", r"global_timer_callback:", r"Faking parameter digest creation", r"Parameters to .* action changed:", r"Parameters to .* changed", ] self.ignore = [ "(ERROR|error): crm_abort:.*crm_glib_handler: ", "(ERROR|error): Message hist queue is filling up", "stonithd.*CRIT: external_hostlist:.*'vmware gethosts' returned an empty hostlist", "stonithd.*(ERROR|error): Could not list nodes for stonith RA external/vmware.", "pengine.*Preventing .* from re-starting", ] class crm_cs_v0(BasePatterns): def __init__(self, name): BasePatterns.__init__(self, name) self.commands.update({ "EpocheCmd" : "crm_node -e --openais", "QuorumCmd" : "crm_node -q --openais", "ParitionCmd" : "crm_node -p --openais", "StartCmd" : "service corosync start", "StopCmd" : "service corosync stop", }) self.search.update({ # The next pattern is too early # "Pat:We_stopped" : "%s.*Service engine unloaded: Pacemaker Cluster Manager", # The next pattern would be preferred, but it doesn't always come out # "Pat:We_stopped" : "%s.*Corosync Cluster Engine exiting with status", "Pat:We_stopped" : "%s\W.*Service engine unloaded: corosync cluster quorum service", "Pat:They_stopped" : "%s\W.*crmd.*Node %s\[.*state is now lost", "Pat:They_dead" : "corosync:.*Node %s is now: lost", "Pat:ChildExit" : "Child process .* exited", "Pat:ChildKilled" : "%s\W.*corosync.*Child process %s terminated with signal 9", "Pat:ChildRespawn" : "%s\W.*corosync.*Respawning failed child process: %s", + + "Pat:InfraUp" : "%s\W.*corosync.*Initializing transport", + "Pat:PacemakerUp" : "%s\W.*pacemakerd.*Starting Pacemaker", }) self.ignore = [ r"crm_mon:", r"crmadmin:", r"update_trace_data", r"async_notify:.*strange, client not found", r"Parse error: Ignoring unknown option .*nodename", r"error: log_operation:.*Operation 'reboot' .* with device 'FencingFail' returned:", r"Child process .* terminated with signal 9", r"getinfo response error: 1$", "sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE", "sbd.* pcmk: error: crm_ipc_read: Connection to cib_ro failed", "sbd.* pcmk: error: mainloop_gio_callback: Connection to cib_ro.* closed .I/O condition=17", ] self.BadNews = [ r"error:", r"crit:", r"ERROR:", r"CRIT:", r"Shutting down...NOW", r"Timer I_TERMINATE just popped", r"input=I_ERROR", r"input=I_FAIL", r"input=I_INTEGRATED cause=C_TIMER_POPPED", r"input=I_FINALIZED cause=C_TIMER_POPPED", r"input=I_ERROR", r", exiting\.", r"(WARN|warn).*Ignoring HA message.*vote.*not in our membership list", r"pengine.*Attempting recovery of resource", r"is taking more than 2x its timeout", r"Confirm not received from", r"Welcome reply not received from", r"Attempting to schedule .* after a stop", r"Resource .* was active at shutdown", r"duplicate entries for call_id", r"Search terminated:", r":global_timer_callback", r"Faking parameter digest creation", r"Parameters to .* action changed:", r"Parameters to .* changed", r"The .* process .* terminated with signal", r"Child process .* terminated with signal", r"LogActions:.*Recover", r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting", r"Peer is not part of our cluster", r"We appear to be in an election loop", r"Unknown node -> we will not deliver message", r"crm_write_blackbox", r"pacemakerd.*Could not connect to Cluster Configuration Database API", r"Receiving messages from a node we think is dead", r"share the same cluster nodeid", r"share the same name", #r"crm_ipc_send:.*Request .* failed", #r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received", # Not inherently bad, but worth tracking #r"No need to invoke the TE", #r"ping.*: DEBUG: Updated connected = 0", #r"Digest mis-match:", r"te_graph_trigger:.*Transition failed: terminated", r"process_ping_reply", r"warn.*:retrieveCib", #r"Executing .* fencing operation", #r"fence_pcmk.* Call to fence", #r"fence_pcmk", r"cman killed by node", r"Election storm", r"stalled the FSA with pending inputs", ] self.components["common-ignore"] = [ "Pending action:", "error: crm_log_message_adv:", "resources were active at shutdown", "pending LRM operations at shutdown", "Lost connection to the CIB service", "Connection to the CIB terminated...", "Sending message to CIB service FAILED", "apply_xml_diff:.*Diff application failed!", "crmd.*Action A_RECOVER .* not supported", "unconfirmed_actions:.*Waiting on .* unconfirmed actions", "cib_native_msgready:.*Message pending on command channel", "crmd.*do_exit:.*Performing A_EXIT_1 - forcefully exiting the CRMd", "verify_stopped:.*Resource .* was active at shutdown. You may ignore this error if it is unmanaged.", "error: attrd_connection_destroy:.*Lost connection to attrd", "info: te_fence_node:.*Executing .* fencing operation", "crm_write_blackbox:", # "error: native_create_actions: Resource .*stonith::.* is active on 2 nodes attempting recovery", # "error: process_pe_message: Transition .* ERRORs found during PE processing", ] self.components["corosync-ignore"] = [ r"error: pcmk_cpg_dispatch:.*Connection to the CPG API failed: Library error", r"The .* process .* exited", r"pacemakerd.*error: pcmk_child_exit:.*Child process .* exited", r"cib.*error: cib_cs_destroy:.*Corosync connection lost", r"stonith-ng.*error: stonith_peer_cs_destroy:.*Corosync connection terminated", r"The cib process .* exited: Invalid argument", r"The attrd process .* exited: Transport endpoint is not connected", r"The crmd process .* exited: Link has been severed", r"error: pcmk_child_exit:.*Child process cib .* exited: Invalid argument", r"error: pcmk_child_exit:.*Child process attrd .* exited: Transport endpoint is not connected", r"error: pcmk_child_exit:.*Child process crmd .* exited: Link has been severed", r"lrmd.*error: crm_ipc_read:.*Connection to stonith-ng failed", r"lrmd.*error: mainloop_gio_callback:.*Connection to stonith-ng.* closed", r"lrmd.*error: stonith_connection_destroy_cb:.*LRMD lost STONITH connection", r"crmd.*do_state_transition:.*State transition .* S_RECOVERY", r"crmd.*error: do_log:.*FSA: Input I_ERROR", r"crmd.*error: do_log:.*FSA: Input I_TERMINATE", r"crmd.*error: pcmk_cman_dispatch:.*Connection to cman failed", r"crmd.*error: crmd_fast_exit:.*Could not recover from internal error", r"error: crm_ipc_read:.*Connection to cib_shm failed", r"error: mainloop_gio_callback:.*Connection to cib_shm.* closed", r"error: stonith_connection_failed:.*STONITH connection failed", ] self.components["corosync"] = [ r"pacemakerd.*error: cfg_connection_destroy:.*Connection destroyed", r"pacemakerd.*error: mcp_cpg_destroy:.*Connection destroyed", r"crit: attrd_(cs|cpg)_destroy:.*Lost connection to Corosync service", r"stonith_peer_cs_destroy:.*Corosync connection terminated", r"cib_cs_destroy:.*Corosync connection lost! Exiting.", r"crmd_(cs|quorum)_destroy:.*connection terminated", r"pengine.*Scheduling Node .* for STONITH", r"tengine_stonith_notify:.*Peer .* was terminated .*: OK", ] self.components["cib-ignore"] = [ "lrmd.*Connection to stonith-ng failed", "lrmd.*Connection to stonith-ng.* closed", "lrmd.*LRMD lost STONITH connection", "lrmd.*STONITH connection failed, finalizing .* pending operations", ] self.components["cib"] = [ "State transition .* S_RECOVERY", "Respawning .* crmd", "Respawning .* attrd", "Connection to cib_.* failed", "Connection to cib_.* closed", "Connection to the CIB terminated...", "(Child process|The) crmd .* exited: Generic Pacemaker error", "(Child process|The) attrd .* exited: (Connection reset by peer|Transport endpoint is not connected)", "Lost connection to CIB service", "crmd.*Input I_TERMINATE from do_recover", "crmd.*I_ERROR.*crmd_cib_connection_destroy", "crmd.*Could not recover from internal error", ] self.components["lrmd"] = [ "State transition .* S_RECOVERY", "LRM Connection failed", "Respawning .* crmd", "Connection to lrmd failed", "Connection to lrmd.* closed", "crmd.*I_ERROR.*lrm_connection_destroy", "(Child process|The) crmd .* exited: Generic Pacemaker error", "crmd.*Input I_TERMINATE from do_recover", "crmd.*Could not recover from internal error", ] self.components["lrmd-ignore"] = [] self.components["crmd"] = [ # "WARN: determine_online_status: Node .* is unclean", # "Scheduling Node .* for STONITH", # "Executing .* fencing operation", # Only if the node wasn't the DC: "State transition S_IDLE", "State transition .* -> S_IDLE", ] self.components["crmd-ignore"] = [] self.components["attrd"] = [] self.components["attrd-ignore"] = [] self.components["pengine"] = [ "State transition .* S_RECOVERY", "Respawning .* crmd", "(The|Child process) crmd .* exited: Generic Pacemaker error", "Connection to pengine failed", "Connection to pengine.* closed", "Connection to the Policy Engine failed", "crmd.*I_ERROR.*save_cib_contents", "crmd.*Input I_TERMINATE from do_recover", "crmd.*Could not recover from internal error", ] self.components["pengine-ignore"] = [] self.components["stonith"] = [ "Connection to stonith-ng failed", "LRMD lost STONITH connection", "Connection to stonith-ng.* closed", "Fencing daemon connection failed", "crmd.*stonith_api_add_notification:.*Callback already present", ] self.components["stonith-ignore"] = [ "LogActions: Recover Fencing", "Updating failcount for Fencing", "error: crm_ipc_read: Connection to stonith-ng failed", "error: mainloop_gio_callback: Connection to stonith-ng.*closed (I/O condition=17)", "crit: tengine_stonith_connection_destroy: Fencing daemon connection failed", "error: te_connect_stonith:.*Sign-in failed: triggered a retry", "STONITH connection failed, finalizing .* pending operations.", "process_lrm_event:.*Operation Fencing.* Error", ] self.components["stonith-ignore"].extend(self.components["common-ignore"]) class crm_mcp(crm_cs_v0): ''' The crm version 4 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of native corosync (no plugins) ''' def __init__(self, name): crm_cs_v0.__init__(self, name) self.commands.update({ "StartCmd" : "service corosync start && service pacemaker start", "StopCmd" : "service pacemaker stop; service pacemaker_remote stop; service corosync stop", "EpocheCmd" : "crm_node -e", "QuorumCmd" : "crm_node -q", "ParitionCmd" : "crm_node -p", }) self.search.update({ # Close enough... "Corosync Cluster Engine exiting normally" isn't printed - # reliably and there's little interest in doing anything it + # reliably and there's little interest in doing anything about it "Pat:We_stopped" : "%s\W.*Unloading all Corosync service engines", "Pat:They_stopped" : "%s\W.*crmd.*Node %s\[.*state is now lost", "Pat:They_dead" : "crmd.*Node %s\[.*state is now lost", "Pat:ChildExit" : "The .* process exited", "Pat:ChildKilled" : "%s\W.*pacemakerd.*The %s process .* terminated with signal 9", "Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s", - - "Pat:InfraUp" : "%s\W.*corosync.*Initializing transport", - "Pat:PacemakerUp" : "%s\W.*pacemakerd.*Starting Pacemaker", }) # if self.Env["have_systemd"]: # self.update({ # # When systemd is in use, we can look for this instead # "Pat:We_stopped" : "%s.*Stopped Corosync Cluster Engine", # }) class crm_mcp_docker(crm_mcp): ''' The crm version 4 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of native corosync (no plugins) ''' def __init__(self, name): crm_mcp.__init__(self, name) self.commands.update({ "StartCmd" : "pcmk_start", "StopCmd" : "pcmk_stop", }) class crm_cman(crm_cs_v0): ''' The crm version 3 cluster manager class. It implements the things we need to talk to and manipulate crm clusters running on top of openais ''' def __init__(self, name): crm_cs_v0.__init__(self, name) self.commands.update({ "StartCmd" : "service pacemaker start", "StopCmd" : "service pacemaker stop; service pacemaker_remote stop", "EpocheCmd" : "crm_node -e --cman", "QuorumCmd" : "crm_node -q --cman", "ParitionCmd" : "crm_node -p --cman", "Pat:We_stopped" : "%s.*Unloading all Corosync service engines", "Pat:They_stopped" : "%s\W.*crmd.*Node %s\[.*state is now lost", "Pat:They_dead" : "crmd.*Node %s\[.*state is now lost", "Pat:ChildKilled" : "%s\W.*pacemakerd.*The %s process .* terminated with signal 9", "Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s", }) class PatternSelector: def __init__(self, name=None): self.name = name self.base = BasePatterns("crm-base") if not name: crm_cs_v0("crm-plugin-v0") crm_cman("crm-cman") crm_mcp("crm-mcp") crm_lha("crm-lha") elif name == "crm-lha": crm_lha(name) elif name == "crm-plugin-v0": crm_cs_v0(name) elif name == "crm-cman": crm_cman(name) elif name == "crm-mcp": crm_mcp(name) elif name == "crm-mcp-docker": crm_mcp_docker(name) def get_variant(self, variant): if patternvariants.has_key(variant): return patternvariants[variant] print "defaulting to crm-base for %s" % variant return self.base def get_patterns(self, variant, kind): return self.get_variant(variant).get_patterns(kind) def get_template(self, variant, key): v = self.get_variant(variant) return v[key] def get_component(self, variant, kind): return self.get_variant(variant).get_component(kind) def __getitem__(self, key): return self.get_template(self.name, key) # python cts/CTSpatt.py -k crm-mcp -t StartCmd if __name__ == '__main__': pdir=os.path.dirname(sys.path[0]) sys.path.insert(0, pdir) # So that things work from the source directory from cts.CTSvars import * kind=None template=None skipthis=None args=sys.argv[1:] for i in range(0, len(args)): if skipthis: skipthis=None continue elif args[i] == "-k" or args[i] == "--kind": skipthis=1 kind = args[i+1] elif args[i] == "-t" or args[i] == "--template": skipthis=1 template = args[i+1] else: print "Illegal argument " + args[i] print PatternSelector(kind)[template]