diff --git a/cts/CIB.py b/cts/CIB.py
index 7922d8be5e..ce8fb4aa02 100644
--- a/cts/CIB.py
+++ b/cts/CIB.py
@@ -1,448 +1,451 @@
 '''CTS: Cluster Testing System: CIB generator
 '''
 __copyright__ = '''
 Author: Andrew Beekhof <abeekhof@suse.de>
 Copyright (C) 2008 Andrew Beekhof
 '''
 
-from UserDict import UserDict
-import sys, time, types, syslog, os, struct, string, signal, traceback, warnings, socket
+import os, string, warnings
 
 from cts.CTSvars import *
 from cts.CTS     import ClusterManager
 
 
 class CibBase:
     def __init__(self, Factory, tag, _id, **kwargs):
         self.tag = tag
         self.name = _id
         self.kwargs = kwargs
         self.values = []
         self.children = []
         self.Factory = Factory
 
     def __repr__(self):
         return "%s-%s" % (self.tag, self.name)
 
     def add_child(self, child):
         self.children.append(child)
 
     def __setitem__(self, key, value):
         if value:
             self.kwargs[key] = value
         else:
             self.values.append(key)
 
 from cib_xml import *
 
 
 class ConfigBase:
     cts_cib = None
     version = "unknown"
     feature_set = "unknown"
     Factory = None
 
     def __init__(self, CM, factory, tmpfile=None):
         self.CM = CM
         self.Factory = factory
 
         if not tmpfile:
             warnings.filterwarnings("ignore")
             tmpfile = os.tmpnam()
             warnings.resetwarnings()
 
         self.Factory.tmpfile = tmpfile
 
     def version(self):
         return self.version
 
     def NextIP(self):
         ip = self.CM.Env["IPBase"]
         if ":" in ip:
             (prefix, sep, suffix) = ip.rpartition(":")
             suffix = str(hex(int(suffix, 16)+1)).lstrip("0x")
         else:
             (prefix, sep, suffix) = ip.rpartition(".")
             suffix = str(int(suffix)+1)
 
         ip = prefix + sep + suffix
         self.CM.Env["IPBase"] = ip
         return ip.strip()
 
 
 class CIB11(ConfigBase):
     feature_set = "3.0"
     version = "pacemaker-1.1"
     counter = 1
 
     def _show(self, command=""):
         output = ""
         (rc, result) = self.Factory.rsh(self.Factory.target, "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin -Ql "+command, None, )
         for line in result:
             output += line
             self.Factory.debug("Generated Config: "+line)
         return output
 
     def NewIP(self, name=None, standard="ocf"):
         if self.CM.Env["IPagent"] == "IPaddr2":
             ip = self.NextIP()
             if not name:
                 if ":" in ip:
                     (prefix, sep, suffix) = ip.rpartition(":")
                     name = "r"+suffix
                 else:
                     name = "r"+ip
 
             r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
             r["ip"] = ip
         
             if ":" in ip:
                 r["cidr_netmask"] = "64"
                 r["nic"] = "eth0"
             else:
                 r["cidr_netmask"] = "32"
 
         else:
             if not name:
                 name = "r%s%d" % (self.CM.Env["IPagent"], self.counter)
                 self.counter = self.counter + 1
 	    r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
 
         r.add_op("monitor", "5s")
         return r
 
     def install(self, target):
         old = self.Factory.tmpfile
 
         # Force a rebuild
         self.cts_cib = None
 
         self.Factory.tmpfile = CTSvars.CRM_CONFIG_DIR+"/cib.xml"
         self.contents(target)
         self.Factory.rsh(self.Factory.target, "chown "+CTSvars.CRM_DAEMON_USER+" "+self.Factory.tmpfile)
 
         self.Factory.tmpfile = old
 
     def contents(self, target=None):
         # fencing resource
         if self.cts_cib:
             return self.cts_cib
 
         if target:
             self.Factory.target = target
 
         self.Factory.rsh(self.Factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self.Factory.tmpfile))
         #cib_base = self.cib_template % (self.feature_set, self.version, ''' remote-tls-port='9898' remote-clear-port='9999' ''')
 
         nodelist = ""
         self.num_nodes = 0
         for node in self.CM.Env["nodes"]:
             nodelist += node + " "
             self.num_nodes = self.num_nodes + 1
 
         no_quorum = "stop"
         if self.num_nodes < 3:
             no_quorum = "ignore"
             self.Factory.log("Cluster only has %d nodes, configuring: no-quroum-policy=ignore" % self.num_nodes)
 
         # Fencing resource
         # Define first so that the shell doesn't reject every update
         if self.CM.Env["DoFencing"]:
             st = Resource(self.Factory, "Fencing", ""+self.CM.Env["stonith-type"], "stonith")
             # Set a threshold for unreliable stonith devices such as the vmware one
             st.add_meta("migration-threshold", "5")
             st.add_op("monitor", "120s", timeout="120s")
             st.add_op("stop", "0", timeout="60s")
             st.add_op("start", "0", timeout="60s")
 
             entries = string.split(self.CM.Env["stonith-params"], ',')
             for entry in entries:
                 (name, value) = string.split(entry, '=')
                 if name == "hostlist" and value == "all":
                     value = string.join(self.CM.Env["nodes"], " ")
                 elif name == "pcmk_host_list" and value == "all":
                     value = string.join(self.CM.Env["nodes"], " ")
 
                 st[name] = value
 
             st.commit()
 
             # Test advanced fencing logic
             if True:
                 stf_nodes = []
                 stt_nodes = []
 
                 # Create the levels
                 stl = FencingTopology(self.Factory)
                 for node in self.CM.Env["nodes"]:
                     ftype = self.CM.Env.RandomGen.choice(["levels-and", "levels-or ", "broadcast "])
                     self.CM.log(" - Using %s fencing for node: %s" % (ftype, node))
                     # for baremetal remote node tests
                     stt_nodes.append("remote_%s" % node)
                     if ftype == "levels-and":
                         stl.level(1, node, "FencingPass,Fencing")
                         stt_nodes.append(node)
 
                     elif ftype == "levels-or ":
                         stl.level(1, node, "FencingFail")
                         stl.level(2, node, "Fencing")
                         stf_nodes.append(node)
 
                 # Create a Dummy agent that always passes for levels-and
                 if len(stt_nodes):
                     self.CM.install_helper("fence_dummy", destdir="/usr/sbin", sourcedir=CTSvars.Fencing_home)
                     stt = Resource(self.Factory, "FencingPass", "fence_dummy", "stonith")
                     stt["pcmk_host_list"] = string.join(stt_nodes, " ")
                     # Wait this many seconds before doing anything, handy for letting disks get flushed too
                     stt["random_sleep_range"] = "30"
                     stt["mode"] = "pass"
                     stt.commit()
 
                 # Create a Dummy agent that always fails for levels-or
                 if len(stf_nodes):
                     self.CM.install_helper("fence_dummy", destdir="/usr/sbin", sourcedir=CTSvars.Fencing_home)
                     stf = Resource(self.Factory, "FencingFail", "fence_dummy", "stonith")
                     stf["pcmk_host_list"] = string.join(stf_nodes, " ")
                     # Wait this many seconds before doing anything, handy for letting disks get flushed too
                     stf["random_sleep_range"] = "30"
                     stf["mode"] = "fail"
                     stf.commit()
 
                 # Now commit the levels themselves
                 stl.commit()
 
         o = Option(self.Factory, "stonith-enabled", self.CM.Env["DoFencing"])
         o["start-failure-is-fatal"] = "false"
         o["pe-input-series-max"] = "5000"
         o["default-action-timeout"] = "90s"
         o["shutdown-escalation"] = "5min"
         o["batch-limit"] = "10"
         o["dc-deadtime"] = "5s"
         o["no-quorum-policy"] = no_quorum
         o["expected-quorum-votes"] = self.num_nodes
 
         if self.CM.Env["DoBSC"] == 1:
             o["ident-string"] = "Linux-HA TEST configuration file - REMOVEME!!"
 
         o.commit()
 
         # Add resources?
         if self.CM.Env["CIBResource"] == 1:
             self.add_resources()
 
         if self.CM.cluster_monitor == 1:
             mon = Resource(self.Factory, "cluster_mon", "ocf", "ClusterMon", "pacemaker")
             mon.add_op("start", "0", requires="nothing")
             mon.add_op("monitor", "5s", requires="nothing")
             mon["update"] = "10"
             mon["extra_options"] = "-r -n"
             mon["user"] = "abeekhof"
             mon["htmlfile"] = "/suse/abeekhof/Export/cluster.html"
             mon.commit()
 
             #self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''')
 
         # generate cib
         self.cts_cib = self._show()
 
         if self.Factory.tmpfile != CTSvars.CRM_CONFIG_DIR+"/cib.xml":
             self.Factory.rsh(self.Factory.target, "rm -f "+self.Factory.tmpfile)
 
         return self.cts_cib
 
     def add_resources(self):
         # Per-node resources
         for node in self.CM.Env["nodes"]:
             name = "rsc_"+node
             r = self.NewIP(name)
             r.prefer(node, "100")
             r.commit()
 
         # Migrator
         # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach
         m = Resource(self.Factory, "migrator","Dummy",  "ocf", "pacemaker")
         m.add_meta("resource-stickiness","1")
         m.add_meta("allow-migrate", "1")
         m.add_op("monitor", "P10S")
         m.commit()
 
         # Ping the test master
         p = Resource(self.Factory, "ping-1","ping",  "ocf", "pacemaker")
         p.add_op("monitor", "60s")
         p["host_list"] = self.CM.Env["cts-master"]
         p["name"] = "connected"
         p["debug"] = "true"
 
         c = Clone(self.Factory, "Connectivity", p)
         c["globally-unique"] = "false"
         c.commit()
 
         #master slave resource
         s = Resource(self.Factory, "stateful-1", "Stateful", "ocf", "pacemaker")
         s.add_op("monitor", "15s", timeout="60s")
         s.add_op("monitor", "16s", timeout="60s", role="Master")
         ms = Master(self.Factory, "master-1", s)
         ms["clone-max"] = self.num_nodes
         ms["master-max"] = 1
         ms["clone-node-max"] = 1
         ms["master-node-max"] = 1
 
         # Require conectivity to run the master
         r = Rule(self.Factory, "connected", "-INFINITY", op="or")
         r.add_child(Expression(self.Factory, "m1-connected-1", "connected", "lt", "1"))
         r.add_child(Expression(self.Factory, "m1-connected-2", "connected", "not_defined", None))
         ms.prefer("connected", rule=r)
 
         ms.commit()
 
         # Group Resource
         g = Group(self.Factory, "group-1")
         g.add_child(self.NewIP())
 
         if self.CM.Env["have_systemd"]:
+            # It would be better to put the python in a separate file, so we
+            # could loop "while True" rather than sleep for 24 hours. We can't
+            # put a loop in a single-line python command; only simple commands
+            # may be separated by semicolon in python.
             dummy_service_file = """
 [Unit]
 Description=Dummy resource that takes a while to start
 
 [Service]
 Type=notify
-ExecStart=/usr/bin/python -c 'import time; import systemd.daemon;time.sleep(10); systemd.daemon.notify("READY=1"); time.sleep(3600)'
-ExecStop=sleep 10
+ExecStart=/usr/bin/python -c 'import time, systemd.daemon; time.sleep(10); systemd.daemon.notify("READY=1"); time.sleep(86400)'
+ExecStop=/bin/sleep 10
 ExecStop=/bin/kill -KILL $MAINPID
 """
 
             os.system("cat <<-END >/tmp/DummySD.service\n%s\nEND" % (dummy_service_file))
 
             self.CM.install_helper("DummySD.service", destdir="/usr/lib/systemd/system/", sourcedir="/tmp")
             sysd = Resource(self.Factory, "petulant", "DummySD",  "service")
             sysd.add_op("monitor", "P10S")
             g.add_child(sysd)
         else:
             g.add_child(self.NewIP())
 
         g.add_child(self.NewIP())
 
         # Group with the master
         g.after("master-1", first="promote", then="start")
         g.colocate("master-1", "INFINITY", withrole="Master")
 
         g.commit()
 
         # LSB resource
         lsb_agent = self.CM.install_helper("LSBDummy")
 
         lsb = Resource(self.Factory, "lsb-dummy",lsb_agent,  "lsb")
         lsb.add_op("monitor", "5s")
 
         # LSB with group
         lsb.after("group-1")
         lsb.colocate("group-1")
 
         lsb.commit()
 
 
 class CIB12(CIB11):
     feature_set = "3.0"
     version = "pacemaker-1.2"
 
 class CIB20(CIB11):
     feature_set = "3.0"
     version = "pacemaker-2.0"
 
 #class HASI(CIB10):
 #    def add_resources(self):
 #        # DLM resource
 #        self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''')
 #        self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''')
 
         # O2CB resource
 #        self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''')
 #        self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''')
 #        self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''')
 #        self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''')
 
 
 class ConfigFactory:
     def __init__(self, CM):
         self.CM = CM
         self.rsh = self.CM.rsh
         self.register("pacemaker11", CIB11, CM, self)
         self.register("pacemaker12", CIB12, CM, self)
         self.register("pacemaker20", CIB20, CM, self)
 #        self.register("hae", HASI, CM, self)
         self.target = self.CM.Env["nodes"][0]
         self.tmpfile = None
 
     def log(self, args):
         self.CM.log("cib: %s" % args)
 
     def debug(self, args):
         self.CM.debug("cib: %s" % args)
 
     def register(self, methodName, constructor, *args, **kargs):
         """register a constructor"""
         _args = [constructor]
         _args.extend(args)
         setattr(self, methodName, apply(ConfigFactoryItem,_args, kargs))
 
     def unregister(self, methodName):
         """unregister a constructor"""
         delattr(self, methodName)
 
     def createConfig(self, name="pacemaker-1.0"):
         if name == "pacemaker-1.0":
             name = "pacemaker10";
         elif name == "pacemaker-1.1":
             name = "pacemaker11";
         elif name == "pacemaker-1.2":
             name = "pacemaker12";
         elif name == "pacemaker-2.0":
             name = "pacemaker20";
         elif name == "hasi":
             name = "hae";
 
         if hasattr(self, name):
             return getattr(self, name)()
         else:
             self.CM.log("Configuration variant '%s' is unknown.  Defaulting to latest config" % name)
 
         return self.pacemaker12()
 
 
 class ConfigFactoryItem:
     def __init__(self, function, *args, **kargs):
         assert callable(function), "function should be a callable obj"
         self._function = function
         self._args = args
         self._kargs = kargs
 
     def __call__(self, *args, **kargs):
         """call function"""
         _args = list(self._args)
         _args.extend(args)
         _kargs = self._kargs.copy()
         _kargs.update(kargs)
         return apply(self._function,_args,_kargs)
 
 # Basic Sanity Testing
 if __name__ == '__main__':
     import CTSlab
     env = CTSlab.LabEnvironment()
     env["nodes"] = []
     env["nodes"].append("pcmk-1")
     env["nodes"].append("pcmk-2")
     env["nodes"].append("pcmk-3")
     env["nodes"].append("pcmk-4")
 
     env["CIBResource"] = 1
     env["IPBase"] = "fe80::1234:56:7890:1000"
     env["DoStonith"] = 1
     env["stonith-type"] = "fence_xvm"
     env["stonith-params"] = "pcmk_arg_map=domain:uname"
 
     manager = ClusterManager(env)
     manager.cluster_monitor = False
 
     CibFactory = ConfigFactory(manager)
     cib = CibFactory.createConfig("pacemaker-1.1")
     print cib.contents()
diff --git a/cts/CM_ais.py b/cts/CM_ais.py
index 9fce591094..44f91cdd96 100644
--- a/cts/CM_ais.py
+++ b/cts/CM_ais.py
@@ -1,154 +1,153 @@
 '''CTS: Cluster Testing System: AIS dependent modules...
 '''
 
 __copyright__ = '''
 Copyright (C) 2007 Andrew Beekhof <andrew@suse.de>
 
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
-import os, sys, warnings
 from cts.CTSvars import *
 from cts.CM_lha  import crm_lha
 from cts.CTS     import Process
 from cts.patterns    import PatternSelector
 
 #######################################################################
 #
 #  LinuxHA v2 dependent modules
 #
 #######################################################################
 
 
 class crm_ais(crm_lha):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of openais
     '''
     def __init__(self, Environment, randseed=None, name=None):
         if not name: name="crm-ais"
         crm_lha.__init__(self, Environment, randseed=randseed, name=name)
 
         self.fullcomplist = {}
         self.templates = PatternSelector(self.name)
 
     def NodeUUID(self, node):
         return node
 
     def ais_components(self):
 
         complist = []
         if not len(self.fullcomplist.keys()):
             for c in ["cib", "lrmd", "crmd", "attrd" ]:
                self.fullcomplist[c] = Process(
                    self, c, 
                    pats = self.templates.get_component(self.name, c),
                    badnews_ignore = self.templates.get_component(self.name, "%s-ignore"%c),
                    common_ignore = self.templates.get_component(self.name, "common-ignore"))
 
                self.fullcomplist["pengine"] = Process(
                    self, "pengine", 
                    dc_pats = self.templates.get_component(self.name, "pengine"),
                    badnews_ignore = self.templates.get_component(self.name, "pengine-ignore"),
                    common_ignore = self.templates.get_component(self.name, "common-ignore"))
 
                self.fullcomplist["stonith-ng"] = Process(
                    self, "stonith-ng", process="stonithd", 
                    pats = self.templates.get_component(self.name, "stonith"),
                    badnews_ignore = self.templates.get_component(self.name, "stonith-ignore"),
                    common_ignore = self.templates.get_component(self.name, "common-ignore"))
 
         vgrind = self.Env["valgrind-procs"].split()
         for key in self.fullcomplist.keys():
             if self.Env["valgrind-tests"]:
                if key in vgrind:
                # Processes running under valgrind can't be shot with "killall -9 processname"
                     self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
                     continue
             if key == "stonith-ng" and not self.Env["DoFencing"]:
                 continue
 
             complist.append(self.fullcomplist[key])
 
         #self.complist = [ fullcomplist["pengine"] ]
         return complist
 
 
 class crm_cs_v0(crm_ais):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
 
     crm clusters running against version 0 of our plugin
     '''
     def __init__(self, Environment, randseed=None, name=None):
         if not name: name="crm-plugin-v0"
         crm_ais.__init__(self, Environment, randseed=randseed, name=name)
 
     def Components(self):
         self.ais_components()
         c = "corosync"
 
         self.fullcomplist[c] = Process(
             self, c, 
             pats = self.templates.get_component(self.name, c),
             badnews_ignore = self.templates.get_component(self.name, "%s-ignore"%c),
             common_ignore = self.templates.get_component(self.name, "common-ignore")
         )
 
         return self.ais_components()
 
 
 class crm_cs_v1(crm_cs_v0):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
 
     crm clusters running on top of version 1 of our plugin
     '''
     def __init__(self, Environment, randseed=None, name=None):
         if not name: name="crm-plugin-v1"
         crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name)
 
 
 class crm_mcp(crm_cs_v0):
     '''
     The crm version 4 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of native corosync (no plugins)
     '''
     def __init__(self, Environment, randseed=None, name=None):
         if not name: name="crm-mcp"
         crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name)
 
         if self.Env["have_systemd"]:
             self.update({
                 # When systemd is in use, we can look for this instead
                 "Pat:We_stopped"   : "%s.*Corosync Cluster Engine exiting normally",
             })
 
 
 class crm_cman(crm_cs_v0):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of openais
     '''
     def __init__(self, Environment, randseed=None, name=None):
         if not name: name="crm-cman"
         crm_cs_v0.__init__(self, Environment, randseed=randseed, name=name)
diff --git a/cts/CM_lha.py b/cts/CM_lha.py
index f290a23c4b..25d5caa817 100755
--- a/cts/CM_lha.py
+++ b/cts/CM_lha.py
@@ -1,534 +1,533 @@
 '''CTS: Cluster Testing System: LinuxHA v2 dependent modules...
 '''
 
 __copyright__ = '''
 Author: Huang Zhen <zhenhltc@cn.ibm.com>
 Copyright (C) 2004 International Business Machines
 
 Additional Audits, Revised Start action, Default Configuration:
      Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
 
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
-import os, sys, warnings
-from cts          import CTS
+import sys
 from cts.CTSvars  import *
 from cts.CTS      import *
 from cts.CIB      import *
 from cts.CTStests import AuditResource
 from cts.watcher  import LogWatcher
 
 try:
     from xml.dom.minidom import *
 except ImportError:
     sys.__stdout__.write("Python module xml.dom.minidom not found\n")
     sys.__stdout__.write("Please install python-xml or similar before continuing\n")
     sys.__stdout__.flush()
     sys.exit(1)
 
 #######################################################################
 #
 #  LinuxHA v2 dependent modules
 #
 #######################################################################
 
 
 class crm_lha(ClusterManager):
     '''
     The linux-ha version 2 cluster manager class.
     It implements the things we need to talk to and manipulate
     linux-ha version 2 clusters
     '''
     def __init__(self, Environment, randseed=None, name=None):
         ClusterManager.__init__(self, Environment, randseed=randseed)
         #HeartbeatCM.__init__(self, Environment, randseed=randseed)
 
         #if not name: name="crm-lha"
         #self["Name"] = name
         #self.name = name
 
         self.fastfail = 0
         self.clear_cache = 0
         self.cib_installed = 0
         self.config = None
         self.cluster_monitor = 0
         self.use_short_names = 1
 
         if self.Env["DoBSC"]:
             del self.templates["Pat:They_stopped"]
             del self.templates["Pat:Logd_stopped"]
             self.Env["use_logd"] = 0
 
         self._finalConditions()
 
         self.check_transitions = 0
         self.check_elections = 0
         self.CIBsync = {}
         self.CibFactory = ConfigFactory(self)
         self.cib = self.CibFactory.createConfig(self.Env["Schema"])
     
     def errorstoignore(self):
         # At some point implement a more elegant solution that 
         #   also produces a report at the end
         '''Return list of errors which are known and very noisey should be ignored'''
         return PatternSelector().get_patterns(self.name, "BadNewsIgnore")
 
     def install_config(self, node):
         if not self.ns.WaitForNodeToComeUp(node):
             self.log("Node %s is not up." % node)
             return None
 
         if not self.CIBsync.has_key(node) and self.Env["ClobberCIB"] == 1:
             self.CIBsync[node] = 1
             self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*")
 
             # Only install the CIB on the first node, all the other ones will pick it up from there
             if self.cib_installed == 1:
                 return None
 
             self.cib_installed = 1
             if self.Env["CIBfilename"] == None:
                 self.log("Installing Generated CIB on node %s" % (node))
                 self.cib.install(node)
 
             else:
                 self.log("Installing CIB (%s) on node %s" % (self.Env["CIBfilename"], node))
                 if 0 != self.rsh.cp(self.Env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node)):
                     raise ValueError("Can not scp file to %s %d"%(node))
         
             self.rsh(node, "chown "+CTSvars.CRM_DAEMON_USER+" "+CTSvars.CRM_CONFIG_DIR+"/cib.xml")
 
     def prepare(self):
         '''Finish the Initialization process. Prepare to test...'''
 
         self.partitions_expected = 1
         for node in self.Env["nodes"]:
             self.ShouldBeStatus[node] = ""
             self.unisolate_node(node)
             self.StataCM(node)
 
     def test_node_CM(self, node):
         '''Report the status of the cluster manager on a given node'''
 
         watchpats = [ ]
         watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)")
         watchpats.append(self.templates["Pat:Slave_started"]%node)
         watchpats.append(self.templates["Pat:Master_started"]%node)
         idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterIdle", hosts=[node], kind=self.Env["LogWatcher"])
         idle_watch.setwatch()
 
         out = self.rsh(node, self.templates["StatusCmd"]%node, 1)
         self.debug("Node %s status: '%s'" %(node, out))            
 
         if not out or string.find(out, 'ok') < 0:
             if self.ShouldBeStatus[node] == "up":
                 self.log(
                     "Node status for %s is %s but we think it should be %s"
                     % (node, "down", self.ShouldBeStatus[node]))
             self.ShouldBeStatus[node] = "down"
             return 0
 
         if self.ShouldBeStatus[node] == "down":
             self.log(
                 "Node status for %s is %s but we think it should be %s: %s"
                 % (node, "up", self.ShouldBeStatus[node], out))
 
         self.ShouldBeStatus[node] = "up"
 
         # check the output first - because syslog-ng looses messages
         if string.find(out, 'S_NOT_DC') != -1:
             # Up and stable
             return 2
         if string.find(out, 'S_IDLE') != -1:
             # Up and stable
             return 2
 
         # fall back to syslog-ng and wait
         if not idle_watch.look():
             # just up
             self.debug("Warn: Node %s is unstable: %s" % (node, out))
             return 1
 
         # Up and stable
         return 2
 
     # Is the node up or is the node down
     def StataCM(self, node):
         '''Report the status of the cluster manager on a given node'''
 
         if self.test_node_CM(node) > 0:
             return 1
         return None
 
     # Being up and being stable is not the same question...
     def node_stable(self, node):
         '''Report the status of the cluster manager on a given node'''
 
         if self.test_node_CM(node) == 2:
             return 1
         self.log("Warn: Node %s not stable" % (node)) 
         return None
 
     def partition_stable(self, nodes, timeout=None):
         watchpats = [ ]
         watchpats.append("Current ping state: S_IDLE")
         watchpats.append(self.templates["Pat:DC_IDLE"])
         self.debug("Waiting for cluster stability...") 
 
         if timeout == None:
             timeout = self.Env["DeadTime"]
 
         if len(nodes) < 3:
             self.debug("Cluster is inactive") 
             return 1
 
         idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterStable", timeout, hosts=nodes.split(), kind=self.Env["LogWatcher"])
         idle_watch.setwatch()
 
         for node in nodes.split():
             # have each node dump its current state
             self.rsh(node, self.templates["StatusCmd"] % node, 1)
 
         ret = idle_watch.look()
         while ret:
             self.debug(ret) 
             for node in nodes.split():
                 if re.search(node, ret):
                     return 1
             ret = idle_watch.look()
 
         self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout)) 
         return None
 
     def cluster_stable(self, timeout=None, double_check=False):
         partitions = self.find_partitions()
 
         for partition in partitions:
             if not self.partition_stable(partition, timeout):
                 return None
 
         if double_check:
             # Make sure we are really stable and that all resources,
             # including those that depend on transient node attributes,
             # are started if they were going to be
             time.sleep(5)
             for partition in partitions:
                 if not self.partition_stable(partition, timeout):
                     return None
 
         return 1
 
     def is_node_dc(self, node, status_line=None):
         rc = 0
 
         if not status_line: 
             status_line = self.rsh(node, self.templates["StatusCmd"]%node, 1)
 
         if not status_line:
             rc = 0
         elif string.find(status_line, 'S_IDLE') != -1:
             rc = 1
         elif string.find(status_line, 'S_INTEGRATION') != -1: 
             rc = 1
         elif string.find(status_line, 'S_FINALIZE_JOIN') != -1: 
             rc = 1
         elif string.find(status_line, 'S_POLICY_ENGINE') != -1: 
             rc = 1
         elif string.find(status_line, 'S_TRANSITION_ENGINE') != -1: 
             rc = 1
 
         return rc
 
     def active_resources(self, node):
         # [SM].* {node} matches Started, Slave, Master
         # Stopped wont be matched as it wont include {node}
         (rc, output) = self.rsh(node, """crm_resource -c""", None)
 
         resources = []
         for line in output: 
             if re.search("^Resource", line):
                 tmp = AuditResource(self, line)
                 if tmp.type == "primitive" and tmp.host == node:
                     resources.append(tmp.id)
         return resources
 
     def ResourceLocation(self, rid):
         ResourceNodes = []
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up":
 
                 cmd = self.templates["RscRunning"] % (rid)
                 (rc, lines) = self.rsh(node, cmd, None)
 
                 if rc == 127:
                     self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd)
                     for line in lines:
                         self.log("Output: "+line)
                 elif rc == 0:
                     ResourceNodes.append(node)
 
         return ResourceNodes
 
     def find_partitions(self):
         ccm_partitions = []
 
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up":
                 partition = self.rsh(node, self.templates["ParitionCmd"], 1)
 
                 if not partition:
                     self.log("no partition details for %s" % node)
                 elif len(partition) > 2:
                     nodes = partition.split()
                     nodes.sort()
                     partition = string.join(nodes, ' ')
 
                     found = 0
                     for a_partition in ccm_partitions:
                         if partition == a_partition:
                             found = 1
                     if found == 0:
                         self.debug("Adding partition from %s: %s" % (node, partition))
                         ccm_partitions.append(partition)
                     else:
                         self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node))
 
                 else:
                     self.log("bad partition details for %s" % node)
             else:
                 self.debug("Node %s is down... skipping" % node)
 
         self.debug("Found partitions: %s" % repr(ccm_partitions) )
         return ccm_partitions
 
     def HasQuorum(self, node_list):
         # If we are auditing a partition, then one side will
         #   have quorum and the other not.
         # So the caller needs to tell us which we are checking
         # If no value for node_list is specified... assume all nodes  
         if not node_list:
             node_list = self.Env["nodes"]
 
         for node in node_list:
             if self.ShouldBeStatus[node] == "up":
                 quorum = self.rsh(node, self.templates["QuorumCmd"], 1)
                 if string.find(quorum, "1") != -1:
                     return 1
                 elif string.find(quorum, "0") != -1:
                     return 0
                 else:
                     self.debug("WARN: Unexpected quorum test result from " + node + ":" + quorum)
 
         return 0
     def Components(self):    
         complist = []
         common_ignore = [
                     "Pending action:",
                     "(ERROR|error): crm_log_message_adv:",
                     "(ERROR|error): MSG: No message to dump",
                     "pending LRM operations at shutdown",
                     "Lost connection to the CIB service",
                     "Connection to the CIB terminated...",
                     "Sending message to CIB service FAILED",
                     "Action A_RECOVER .* not supported",
                     "(ERROR|error): stonithd_op_result_ready: not signed on",
                     "pingd.*(ERROR|error): send_update: Could not send update",
                     "send_ipc_message: IPC Channel to .* is not connected",
                     "unconfirmed_actions: Waiting on .* unconfirmed actions",
                     "cib_native_msgready: Message pending on command channel",
                     "do_exit: Performing A_EXIT_1 - forcefully exiting the CRMd",
                     "verify_stopped: Resource .* was active at shutdown.  You may ignore this error if it is unmanaged.",
             ]
 
         stonith_ignore = [
             "(ERROR|error): stonithd_signon: ",
             "update_failcount: Updating failcount for child_DoFencing",
             "(ERROR|error): te_connect_stonith: Sign-in failed: triggered a retry",
             "lrmd.*(ERROR|error): cl_get_value: wrong argument (reply)",
             "lrmd.*(ERROR|error): is_expected_msg:.* null message",
             "lrmd.*(ERROR|error): stonithd_receive_ops_result failed.",
              ]
 
         stonith_ignore.extend(common_ignore)
 
         ccm_ignore = [
             "(ERROR|error): get_channel_token: No reply message - disconnected"
             ]
 
         ccm_ignore.extend(common_ignore)
 
         ccm = Process(self, "ccm", triggersreboot=self.fastfail, pats = [
                     "State transition .* S_RECOVERY",
                     "CCM connection appears to have failed",
                     "crmd.*Action A_RECOVER .* not supported",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "Exiting to recover from CCM connection failure",
                     "crmd.*do_exit: Could not recover from internal error",
                     "crmd.*I_ERROR.*(ccm_dispatch|crmd_cib_connection_destroy)",
                     "crmd.*exited with return code 2.",
                     "attrd.*exited with return code 1.",
                     "cib.*exited with return code 2.",
 
 # Not if it was fenced
 #                    "A new node joined the cluster",
 
 #                    "WARN: determine_online_status: Node .* is unclean",
 #                    "Scheduling Node .* for STONITH",
 #                    "Executing .* fencing operation",
 #                    "tengine_stonith_callback: .*result=0",
 #                    "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE",
 #                    "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN",
                     "State transition S_STARTING -> S_PENDING",
                     ], badnews_ignore = ccm_ignore)
 
         cib = Process(self, "cib", triggersreboot=self.fastfail, pats = [
                     "State transition .* S_RECOVERY",
                     "Lost connection to the CIB service",
                     "Connection to the CIB terminated...",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.*I_ERROR.*crmd_cib_connection_destroy",
                     "crmd.*do_exit: Could not recover from internal error",
                     "crmd.*exited with return code 2.",
                     "attrd.*exited with return code 1.",
                     ], badnews_ignore = common_ignore)
 
         lrmd = Process(self, "lrmd", triggersreboot=self.fastfail, pats = [
                     "State transition .* S_RECOVERY",
                     "LRM Connection failed",
                     "crmd.*I_ERROR.*lrm_connection_destroy",
                     "State transition S_STARTING -> S_PENDING",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.*do_exit: Could not recover from internal error",
                     "crmd.*exited with return code 2.",
                     ], badnews_ignore = common_ignore)
 
         crmd = Process(self, "crmd", triggersreboot=self.fastfail, pats = [
 #                    "WARN: determine_online_status: Node .* is unclean",
 #                    "Scheduling Node .* for STONITH",
 #                    "Executing .* fencing operation",
 #                    "tengine_stonith_callback: .*result=0",
                     "State transition .* S_IDLE",
                     "State transition S_STARTING -> S_PENDING",
                     ], badnews_ignore = common_ignore)
 
         pengine = Process(self, "pengine", triggersreboot=self.fastfail, pats = [
                     "State transition .* S_RECOVERY",
                     "crmd.*exited with return code 2.",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.*do_exit: Could not recover from internal error",
                     "crmd.*CRIT: pe_connection_destroy: Connection to the Policy Engine failed",
                     "crmd.*I_ERROR.*save_cib_contents",
                     "crmd.*exited with return code 2.",
                     ], badnews_ignore = common_ignore, dc_only=1)
 
         if self.Env["DoFencing"] == 1 :
             complist.append(Process(self, "stoniths", triggersreboot=self.fastfail, dc_pats = [
                         "crmd.*CRIT: tengine_stonith_connection_destroy: Fencing daemon connection failed",
                         "Attempting connection to fencing daemon",
                         "te_connect_stonith: Connected",
                     ], badnews_ignore = stonith_ignore))
 
         if self.fastfail == 0:
             ccm.pats.extend([
                 "attrd .* exited with return code 1",
                 "(ERROR|error): Respawning client .*attrd",
                 "cib.* exited with return code 2",
                 "(ERROR|error): Respawning client .*cib",
                 "crmd.* exited with return code 2",
                 "(ERROR|error): Respawning client .*crmd" 
                 ])
             cib.pats.extend([
                 "attrd.* exited with return code 1",
                 "(ERROR|error): Respawning client .*attrd",
                 "crmd.* exited with return code 2",
                 "(ERROR|error): Respawning client .*crmd" 
                 ])
             lrmd.pats.extend([
                 "crmd.* exited with return code 2",
                 "(ERROR|error): Respawning client .*crmd" 
                 ])
             pengine.pats.extend([
                 "(ERROR|error): Respawning client .*crmd" 
                 ])
 
         complist.append(ccm)
         complist.append(cib)
         complist.append(lrmd)
         complist.append(crmd)
         complist.append(pengine)
 
         return complist
 
     def NodeUUID(self, node):
         lines = self.rsh(node, self.templates["UUIDQueryCmd"], 1)
         for line in lines:
             self.debug("UUIDLine:" + line)
             m = re.search(r'%s.+\((.+)\)' % node, line)
             if m:
                 return m.group(1)
         return ""
 
     def StandbyStatus(self, node):
         out=self.rsh(node, self.templates["StandbyQueryCmd"] % node, 1)
         if not out:
             return "off"
         out = out[:-1]
         self.debug("Standby result: "+out)
         return out
 
     # status == "on" : Enter Standby mode
     # status == "off": Enter Active mode
     def SetStandbyMode(self, node, status):
         current_status = self.StandbyStatus(node)
         cmd = self.templates["StandbyCmd"] % (node, status)
         ret = self.rsh(node, cmd)
         return True
 
     def AddDummyRsc(self, node, rid):
         rsc_xml = """ '<resources>
                 <primitive class=\"ocf\" id=\"%s\" provider=\"pacemaker\" type=\"Dummy\">
                     <operations>
                         <op id=\"%s-interval-10s\" interval=\"10s\" name=\"monitor\"/
                     </operations>
                 </primitive>
             </resources>'""" % (rid, rid)
         constraint_xml = """ '<constraints>
                 <rsc_location id=\"location-%s-%s\" node=\"%s\" rsc=\"%s\" score=\"INFINITY\"/>
             </constraints>'
             """ % (rid, node, node, rid)
 
         self.rsh(node, self.templates['CibAddXml'] % (rsc_xml))
         self.rsh(node, self.templates['CibAddXml'] % (constraint_xml))
 
     def RemoveDummyRsc(self, node, rid):
         constraint = "\"//rsc_location[@rsc='%s']\"" % (rid)
         rsc = "\"//primitive[@id='%s']\"" % (rid)
 
         self.rsh(node, self.templates['CibDelXpath'] % constraint)
         self.rsh(node, self.templates['CibDelXpath'] % rsc)
 
 
 #######################################################################
 #
 #   A little test code...
 #
 #   Which you are advised to completely ignore...
 #
 #######################################################################
 if __name__ == '__main__': 
     pass
diff --git a/cts/CTS.py b/cts/CTS.py
index f4198c441e..9f9a291ef2 100644
--- a/cts/CTS.py
+++ b/cts/CTS.py
@@ -1,1018 +1,1001 @@
 '''CTS: Cluster Testing System: Main module
 
 Classes related to testing high-availability clusters...
  '''
 
 __copyright__ = '''
 Copyright (C) 2000, 2001 Alan Robertson <alanr@unix.sh>
 Licensed under the GNU GPL.
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
-import types, string, select, sys, time, re, os, struct, signal
-import time, syslog, random, traceback, base64, pickle, binascii, fcntl
+import string, sys, time, re, os, traceback
 
-
-from socket import gethostbyname_ex
 from UserDict import UserDict
-from subprocess import Popen,PIPE
-from threading import Thread
 
 from cts.CTSvars     import *
 from cts.logging     import LogFactory
 from cts.watcher     import LogWatcher
 from cts.remote      import RemoteFactory
 from cts.environment import EnvFactory
 from cts.patterns    import PatternSelector
 
 has_log_stats = {}
 log_stats_bin = CTSvars.CRM_DAEMON_DIR + "/cts_log_stats.sh"
 log_stats = """
 #!/bin/bash
 # Tool for generating system load reports while CTS runs
 
 trap "" 1
 
 f=$1; shift
 action=$1; shift
 base=`basename $0`
 
 if [ ! -e $f ]; then
     echo "Time, Load 1, Load 5, Load 15, Test Marker" > $f
 fi
 
 function killpid() {
     if [ -e $f.pid ]; then
        kill -9 `cat $f.pid`
        rm -f $f.pid
     fi
 }
 
 function status() {
     if [ -e $f.pid ]; then
        kill -0 `cat $f.pid`
        return $?
     else
        return 1
     fi
 }
 
 function start() {
     # Is it already running?
     if
 	status
     then
         return
     fi
 
     echo Active as $$
     echo $$ > $f.pid
 
     while [ 1 = 1 ]; do
         uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
         #top -b -c -n1 | grep -e usr/libexec/pacemaker | grep -v -e grep -e python | head -n 1 | sed s@/usr/libexec/pacemaker/@@ | awk '{print " 0, "$9", "$10", "$12}' | tr '\\n' ',' >> $f
         echo 0 >> $f
         sleep 5
     done
 }
 
 case $action in
     start)
         start
         ;;
     start-bg|bg)
         # Use c --ssh -- ./stats.sh file start-bg
         nohup $0 $f start >/dev/null 2>&1 </dev/null &
         ;;
     stop)
 	killpid
 	;;
     delete)
 	killpid
 	rm -f $f
 	;;
     mark)
 	uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
 	echo " $*" >> $f
         start
 	;;
     *)
 	echo "Unknown action: $action."
 	;;
 esac
 """
 
 class CtsLab:
     '''This class defines the Lab Environment for the Cluster Test System.
     It defines those things which are expected to change from test
     environment to test environment for the same cluster manager.
 
     It is where you define the set of nodes that are in your test lab
     what kind of reset mechanism you use, etc.
 
     This class is derived from a UserDict because we hold many
     different parameters of different kinds, and this provides
     provide a uniform and extensible interface useful for any kind of
     communication between the user/administrator/tester and CTS.
 
     At this point in time, it is the intent of this class to model static
     configuration and/or environmental data about the environment which
     doesn't change as the tests proceed.
 
     Well-known names (keys) are an important concept in this class.
     The HasMinimalKeys member function knows the minimal set of
     well-known names for the class.
 
     The following names are standard (well-known) at this time:
 
         nodes           An array of the nodes in the cluster
         reset           A ResetMechanism object
         logger          An array of objects that log strings...
         CMclass         The type of ClusterManager we are running
                         (This is a class object, not a class instance)
         RandSeed        Random seed.  It is a triple of bytes. (optional)
 
     The CTS code ignores names it doesn't know about/need.
     The individual tests have access to this information, and it is
     perfectly acceptable to provide hints, tweaks, fine-tuning
     directions or other information to the tests through this mechanism.
     '''
 
     def __init__(self, args=None):
         self.Env = EnvFactory().getInstance(args)
         self.Scenario = None
         self.logger = LogFactory()
         self.rsh = RemoteFactory().getInstance()
 
     def dump(self):
         self.Env.dump()
 
     def has_key(self, key):
         return self.Env.has_key(key)
 
     def __getitem__(self, key):
         return self.Env[key]
 
     def __setitem__(self, key, value):
         self.Env[key] = value
 
-    def HasMinimalKeys(self):
-        'Return TRUE if our object has the minimal set of keys/values in it'
-        result = 1
-        for key in self.MinimalKeys:
-            if not self.has_key(key):
-                result = None
-        return result
-
     def run(self, Scenario, Iterations):
         if not Scenario:
             self.logger.log("No scenario was defined")
             return 1
 
         self.logger.log("Cluster nodes: ")
         for node in self.Env["nodes"]:
             self.logger.log("    * %s" % (node))
 
         if not Scenario.SetUp():
             return 1
 
         try :
             Scenario.run(Iterations)
         except :
             self.logger.log("Exception by %s" % sys.exc_info()[0])
             self.logger.traceback(traceback)
 
             Scenario.summarize()
             Scenario.TearDown()
             return 1
 
         #ClusterManager.oprofileSave(Iterations)
         Scenario.TearDown()
 
         Scenario.summarize()
         if Scenario.Stats["failure"] > 0:
             return Scenario.Stats["failure"]
 
         elif Scenario.Stats["success"] != Iterations:
             self.logger.log("No failure count but success != requested iterations")
             return 1
 
         return 0
 
-    def IsValidNode(self, node):
-        'Return TRUE if the given node is valid'
-        return self.Nodes.has_key(node)
-
     def __CheckNode(self, node):
         "Raise a ValueError if the given node isn't valid"
 
         if not self.IsValidNode(node):
             raise ValueError("Invalid node [%s] in CheckNode" % node)
 
 class NodeStatus:
     def __init__(self, env):
         self.Env = env
 
     def IsNodeBooted(self, node):
         '''Return TRUE if the given node is booted (responds to pings)'''
         if self.Env["docker"]:
             return RemoteFactory().getInstance()("localhost", "docker inspect --format {{.State.Running}} %s | grep -q true" % node, silent=True) == 0
 
         return RemoteFactory().getInstance()("localhost", "ping -nq -c1 -w1 %s" % node, silent=True) == 0
 
     def IsSshdUp(self, node):
         rc = RemoteFactory().getInstance()(node, "true", silent=True)
         return rc == 0
 
     def WaitForNodeToComeUp(self, node, Timeout=300):
         '''Return TRUE when given node comes up, or None/FALSE if timeout'''
         timeout = Timeout
         anytimeouts = 0
         while timeout > 0:
             if self.IsNodeBooted(node) and self.IsSshdUp(node):
                 if anytimeouts:
                      # Fudge to wait for the system to finish coming up
                      time.sleep(30)
                      LogFactory().debug("Node %s now up" % node)
                 return 1
 
             time.sleep(30)
             if (not anytimeouts):
                 LogFactory().debug("Waiting for node %s to come up" % node)
 
             anytimeouts = 1
             timeout = timeout - 1
 
         LogFactory().log("%s did not come up within %d tries" % (node, Timeout))
         answer = raw_input('Continue? [nY]')
         if answer and answer == "n":
             raise ValueError("%s did not come up within %d tries" % (node, Timeout))
 
     def WaitForAllNodesToComeUp(self, nodes, timeout=300):
         '''Return TRUE when all nodes come up, or FALSE if timeout'''
 
         for node in nodes:
             if not self.WaitForNodeToComeUp(node, timeout):
                 return None
         return 1
 
 
 class ClusterManager(UserDict):
     '''The Cluster Manager class.
     This is an subclass of the Python dictionary class.
     (this is because it contains lots of {name,value} pairs,
     not because it's behavior is that terribly similar to a
     dictionary in other ways.)
 
     This is an abstract class which class implements high-level
     operations on the cluster and/or its cluster managers.
     Actual cluster managers classes are subclassed from this type.
 
     One of the things we do is track the state we think every node should
     be in.
     '''
 
     def __InitialConditions(self):
         #if os.geteuid() != 0:
         #  raise ValueError("Must Be Root!")
         None
 
     def _finalConditions(self):
         for key in self.keys():
             if self[key] == None:
                 raise ValueError("Improper derivation: self[" + key +   "] must be overridden by subclass.")
 
     def __init__(self, Environment, randseed=None):
         self.Env = EnvFactory().getInstance()
         self.templates = PatternSelector(self.Env["Name"])
         self.__InitialConditions()
         self.logger = LogFactory()
         self.clear_cache = 0
         self.TestLoggingLevel=0
         self.data = {}
         self.name = self.Env["Name"]
 
         self.rsh = RemoteFactory().getInstance()
         self.ShouldBeStatus={}
         self.ns = NodeStatus(self.Env)
         self.OurNode = string.lower(os.uname()[1])
         self.__instance_errorstoignore = []
 
     def __getitem__(self, key):
         if key == "Name":
             return self.name
 
         print "FIXME: Getting %s from %s" % (key, repr(self))
         if self.data.has_key(key):
             return self.data[key]
 
         return self.templates.get_patterns(self.Env["Name"], key)
 
     def __setitem__(self, key, value):
         print "FIXME: Setting %s=%s on %s" % (key, value, repr(self))
         self.data[key] = value
 
     def key_for_node(self, node):
         return node
 
     def instance_errorstoignore_clear(self):
         '''Allows the test scenario to reset instance errors to ignore on each iteration.'''
         self.__instance_errorstoignore = []
 
     def instance_errorstoignore(self):
         '''Return list of errors which are 'normal' for a specific test instance'''
         return self.__instance_errorstoignore
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return []
 
     def log(self, args):
         self.logger.log(args)
 
     def debug(self, args):
         self.logger.debug(args)
 
     def prepare(self):
         '''Finish the Initialization process. Prepare to test...'''
 
         print repr(self)+"prepare"
         for node in self.Env["nodes"]:
             if self.StataCM(node):
                 self.ShouldBeStatus[node] = "up"
             else:
                 self.ShouldBeStatus[node] = "down"
 
             self.unisolate_node(node)
 
     def upcount(self):
         '''How many nodes are up?'''
         count = 0
         for node in self.Env["nodes"]:
           if self.ShouldBeStatus[node] == "up":
             count = count + 1
         return count
 
     def install_helper(self, filename, destdir=None, nodes=None, sourcedir=None):
         if sourcedir == None:
             sourcedir = CTSvars.CTS_home
         file_with_path = "%s/%s" % (sourcedir, filename)
         if not nodes:
             nodes = self.Env["nodes"]
 
         if not destdir:
             destdir = CTSvars.CTS_home
 
         self.debug("Installing %s to %s on %s" % (filename, destdir, repr(self.Env["nodes"])))
         for node in nodes:
             self.rsh(node, "mkdir -p %s" % destdir)
             self.rsh.cp(file_with_path, "root@%s:%s/%s" % (node, destdir, filename))
         return file_with_path
 
     def install_config(self, node):
         return None
 
     def clear_all_caches(self):
         if self.clear_cache:
             for node in self.Env["nodes"]:
                 if self.ShouldBeStatus[node] == "down":
                     self.debug("Removing cache file on: "+node)
                     self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
                 else:
                     self.debug("NOT Removing cache file on: "+node)
 
     def prepare_fencing_watcher(self, name):
         # If we don't have quorum now but get it as a result of starting this node,
         # then a bunch of nodes might get fenced
         upnode = None
         if self.HasQuorum(None):
             self.debug("Have quorum")
             return None
 
         if not self.templates["Pat:Fencing_start"]:
             print "No start pattern"
             return None
 
         if not self.templates["Pat:Fencing_ok"]:
             print "No ok pattern"
             return None
 
         stonith = None
         stonithPats = []
         for peer in self.Env["nodes"]:
             if self.ShouldBeStatus[peer] != "up":
                 stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                 stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
             elif self.Env["Stack"] == "corosync (cman)":
                 # There is a delay between gaining quorum and CMAN starting fencing
                 # This can mean that even nodes that are fully up get fenced
                 # There is no use fighting it, just look for everyone so that CTS doesn't get confused
                 stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                 stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
 
         stonith = LogWatcher(self.Env["LogFileName"], stonithPats, "StartupFencing", 0, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
         stonith.setwatch()
         return stonith
 
     def fencing_cleanup(self, node, stonith):
         peer_list = []
         peer_state = {}
 
         self.debug("Looking for nodes that were fenced as a result of %s starting" % node)
 
         # If we just started a node, we may now have quorum (and permission to fence)
         if not stonith:
             self.debug("Nothing to do")
             return peer_list
 
         q = self.HasQuorum(None)
         if not q and len(self.Env["nodes"]) > 2:
             # We didn't gain quorum - we shouldn't have shot anyone
             self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"])))
             return peer_list
 
         for n in self.Env["nodes"]:
             peer_state[n] = "unknown"
 
         # Now see if any states need to be updated
         self.debug("looking for: " + repr(stonith.regexes))
         shot = stonith.look(0)
         while shot:
             line = repr(shot)
             self.debug("Found: " + line)
             del stonith.regexes[stonith.whichmatch]
 
             # Extract node name
             for n in self.Env["nodes"]:
                 if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
                     peer = n
                     peer_state[peer] = "complete"
                     self.__instance_errorstoignore.append(self.templates["Pat:Fencing_ok"] % peer)
 
                 elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
                     # TODO: Correctly detect multiple fencing operations for the same host
                     peer = n
                     peer_state[peer] = "in-progress"
                     self.__instance_errorstoignore.append(self.templates["Pat:Fencing_start"] % peer)
 
             if not peer:
                 self.logger.log("ERROR: Unknown stonith match: %s" % line)
 
             elif not peer in peer_list:
                 self.debug("Found peer: " + peer)
                 peer_list.append(peer)
 
             # Get the next one
             shot = stonith.look(60)
 
         for peer in peer_list:
 
             self.debug("   Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
             if self.Env["at-boot"]:
                 self.ShouldBeStatus[peer] = "up"
             else:
                 self.ShouldBeStatus[peer] = "down"
 
             if peer_state[peer] == "in-progress":
                 # Wait for any in-progress operations to complete
                 shot = stonith.look(60)
                 while len(stonith.regexes) and shot:
                     line = repr(shot)
                     self.debug("Found: " + line)
                     del stonith.regexes[stonith.whichmatch]
                     shot = stonith.look(60)
 
             # Now make sure the node is alive too
             self.ns.WaitForNodeToComeUp(peer, self.Env["DeadTime"])
 
             # Poll until it comes up
             if self.Env["at-boot"]:
                 if not self.StataCM(peer):
                     time.sleep(self.Env["StartTime"])
 
                 if not self.StataCM(peer):
                     self.logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
                     return None
 
         return peer_list
 
     def StartaCM(self, node, verbose=False):
 
         '''Start up the cluster manager on a given node'''
         if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node))
         else: self.debug("Starting %s on node %s" % (self.templates["Name"], node))
         ret = 1
 
         if not self.ShouldBeStatus.has_key(node):
             self.ShouldBeStatus[node] = "down"
 
         if self.ShouldBeStatus[node] != "down":
             return 1
 
         patterns = []
         # Technically we should always be able to notice ourselves starting
         patterns.append(self.templates["Pat:Local_started"] % node)
         if self.upcount() == 0:
             patterns.append(self.templates["Pat:Master_started"] % node)
         else:
             patterns.append(self.templates["Pat:Slave_started"] % node)
 
         watch = LogWatcher(
             self.Env["LogFileName"], patterns, "StartaCM", self.Env["StartTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
 
         self.install_config(node)
 
         self.ShouldBeStatus[node] = "any"
         if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
             self.logger.log ("%s was already started" % (node))
             return 1
 
         # Clear out the host cache so autojoin can be exercised
         if self.clear_cache:
             self.debug("Removing cache file on: "+node)
             self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
 
         if not(self.Env["valgrind-tests"]):
             startCmd = self.templates["StartCmd"]
         else:
             if self.Env["valgrind-prefix"]:
                 prefix = self.Env["valgrind-prefix"]
             else:
                 prefix = "cts"
 
             startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
                 self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self.templates["StartCmd"])
 
         stonith = self.prepare_fencing_watcher(node)
 
         watch.setwatch()
 
         if self.rsh(node, startCmd) != 0:
             self.logger.log ("Warn: Start command failed on node %s" % (node))
             self.fencing_cleanup(node, stonith)
             return None
 
         self.ShouldBeStatus[node] = "up"
         watch_result = watch.lookforall()
 
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
 
         if watch_result and self.cluster_stable(self.Env["DeadTime"]):
             #self.debug("Found match: "+ repr(watch_result))
             self.fencing_cleanup(node, stonith)
             return 1
 
         elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
             self.fencing_cleanup(node, stonith)
             return 1
 
         self.logger.log ("Warn: Start failed for node %s" % (node))
         return None
 
     def StartaCMnoBlock(self, node, verbose=False):
 
         '''Start up the cluster manager on a given node with none-block mode'''
 
         if verbose: self.logger.log("Starting %s on node %s" % (self["Name"], node))
         else: self.debug("Starting %s on node %s" % (self["Name"], node))
 
         # Clear out the host cache so autojoin can be exercised
         if self.clear_cache:
             self.debug("Removing cache file on: "+node)
             self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
 
         self.install_config(node)
         if not(self.Env["valgrind-tests"]):
             startCmd = self.templates["StartCmd"]
         else:
             if self.Env["valgrind-prefix"]:
                 prefix = self.Env["valgrind-prefix"]
             else:
                 prefix = "cts"
 
             startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
                 self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self.templates["StartCmd"])
 
         self.rsh(node, startCmd, synchronous=0)
         self.ShouldBeStatus[node] = "up"
         return 1
 
     def StopaCM(self, node, verbose=False, force=False):
 
         '''Stop the cluster manager on a given node'''
 
         if verbose: self.logger.log("Stopping %s on node %s" % (self["Name"], node))
         else: self.debug("Stopping %s on node %s" % (self["Name"], node))
 
         if self.ShouldBeStatus[node] != "up" and force == False:
             return 1
 
         if self.rsh(node, self.templates["StopCmd"]) == 0:
             # Make sure we can continue even if corosync leaks
             # fdata-* is the old name
             #self.rsh(node, "rm -f /dev/shm/qb-* /dev/shm/fdata-*")
             self.ShouldBeStatus[node] = "down"
             self.cluster_stable(self.Env["DeadTime"])
             return 1
         else:
             self.logger.log ("ERROR: Could not stop %s on node %s" % (self["Name"], node))
 
         return None
 
     def StopaCMnoBlock(self, node):
 
         '''Stop the cluster manager on a given node with none-block mode'''
 
         self.debug("Stopping %s on node %s" % (self["Name"], node))
 
         self.rsh(node, self.templates["StopCmd"], synchronous=0)
         self.ShouldBeStatus[node] = "down"
         return 1
 
     def cluster_stable(self, timeout = None):
         time.sleep(self.Env["StableTime"])
         return 1
 
     def node_stable(self, node):
         return 1
 
     def RereadCM(self, node):
 
         '''Force the cluster manager on a given node to reread its config
            This may be a no-op on certain cluster managers.
         '''
         rc=self.rsh(node, self.templates["RereadCmd"])
         if rc == 0:
             return 1
         else:
             self.logger.log ("Could not force %s on node %s to reread its config"
             %        (self["Name"], node))
         return None
 
     def StataCM(self, node):
 
         '''Report the status of the cluster manager on a given node'''
 
         out=self.rsh(node, self.templates["StatusCmd"] % node, 1)
         ret= (string.find(out, 'stopped') == -1)
 
         try:
             if ret:
                 if self.ShouldBeStatus[node] == "down":
                     self.logger.log(
                     "Node status for %s is %s but we think it should be %s"
                     %        (node, "up", self.ShouldBeStatus[node]))
             else:
                 if self.ShouldBeStatus[node] == "up":
                     self.logger.log(
                     "Node status for %s is %s but we think it should be %s"
                     %        (node, "down", self.ShouldBeStatus[node]))
         except KeyError:        pass
 
         if ret:
             self.ShouldBeStatus[node] = "up"
         else:
             self.ShouldBeStatus[node] = "down"
         return ret
 
     def startall(self, nodelist=None, verbose=False, quick=False):
 
         '''Start the cluster manager on every node in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
         map = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
 
         for node in nodelist:
             if self.ShouldBeStatus[node] == "down":
                 self.ns.WaitForAllNodesToComeUp(nodelist, 300)
 
         if not quick:
             if not self.StartaCM(node, verbose=verbose):
                 return 0
             return 1
 
         # Approximation of SimulStartList for --boot 
         watchpats = [ ]
         watchpats.append(self.templates["Pat:DC_IDLE"])
         for node in nodelist:
             watchpats.append(self.templates["Pat:Local_started"] % node)
             watchpats.append(self.templates["Pat:InfraUp"] % node)
             watchpats.append(self.templates["Pat:PacemakerUp"] % node)
 
         #   Start all the nodes - at about the same time...
         watch = LogWatcher(self.Env["LogFileName"], watchpats, "fast-start", self.Env["DeadTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
         watch.setwatch()
 
         if not self.StartaCM(nodelist[0], verbose=verbose):
             return 0
         for node in nodelist:
             self.StartaCMnoBlock(node, verbose=verbose)
 
         watch.lookforall()
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
 
         if not self.cluster_stable():
             self.logger.log("Cluster did not stabilize")
             return 0
 
         return 1
 
     def stopall(self, nodelist=None, verbose=False, force=False):
 
         '''Stop the cluster managers on every node in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
 
         ret = 1
         map = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up" or force == True:
                 if not self.StopaCM(node, verbose=verbose, force=force):
                     ret = 0
         return ret
 
     def rereadall(self, nodelist=None):
 
         '''Force the cluster managers on every node in the cluster
         to reread their config files.  We can do it on a subset of the
         cluster if nodelist is not None.
         '''
 
         map = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up":
                 self.RereadCM(node)
 
     def statall(self, nodelist=None):
 
         '''Return the status of the cluster managers in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
 
         result = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
         for node in nodelist:
             if self.StataCM(node):
                 result[node] = "up"
             else:
                 result[node] = "down"
         return result
 
     def isolate_node(self, target, nodes=None):
         '''isolate the communication between the nodes'''
         if not nodes:
             nodes = self.Env["nodes"]
 
         for node in nodes:
             if node != target:
                 rc = self.rsh(target, self.templates["BreakCommCmd"] % self.key_for_node(node))
                 if rc != 0:
                     self.logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
                     return None
                 else:
                     self.debug("Communication cut between %s and %s" % (target, node))
         return 1
 
     def unisolate_node(self, target, nodes=None):
         '''fix the communication between the nodes'''
         if not nodes:
             nodes = self.Env["nodes"]
 
         for node in nodes:
             if node != target:
                 restored = 0
 
                 # Limit the amount of time we have asynchronous connectivity for
                 # Restore both sides as simultaneously as possible
                 self.rsh(target, self.templates["FixCommCmd"] % self.key_for_node(node), synchronous=0)
                 self.rsh(node, self.templates["FixCommCmd"] % self.key_for_node(target), synchronous=0)
                 self.debug("Communication restored between %s and %s" % (target, node))
 
     def reducecomm_node(self,node):
         '''reduce the communication between the nodes'''
         rc = self.rsh(node, self.templates["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"]))
         if rc == 0:
             return 1
         else:
             self.logger.log("Could not reduce the communication between the nodes from node: %s" % node)
         return None
 
     def restorecomm_node(self,node):
         '''restore the saved communication between the nodes'''
         rc = 0
         if float(self.Env["XmitLoss"]) != 0 or float(self.Env["RecvLoss"]) != 0 :
             rc = self.rsh(node, self.templates["RestoreCommCmd"]);
         if rc == 0:
             return 1
         else:
             self.logger.log("Could not restore the communication between the nodes from node: %s" % node)
         return None
 
     def HasQuorum(self, node_list):
         "Return TRUE if the cluster currently has quorum"
         # If we are auditing a partition, then one side will
         #   have quorum and the other not.
         # So the caller needs to tell us which we are checking
         # If no value for node_list is specified... assume all nodes
         raise ValueError("Abstract Class member (HasQuorum)")
 
     def Components(self):
         raise ValueError("Abstract Class member (Components)")
 
     def oprofileStart(self, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileStart(n)
 
         elif node in self.Env["oprofile"]:
             self.debug("Enabling oprofile on %s" % node)
             self.rsh(node, "opcontrol --init")
             self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
             self.rsh(node, "opcontrol --start")
             self.rsh(node, "opcontrol --reset")
 
     def oprofileSave(self, test, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileSave(test, n)
 
         elif node in self.Env["oprofile"]:
             self.rsh(node, "opcontrol --dump")
             self.rsh(node, "opcontrol --save=cts.%d" % test)
             # Read back with: opreport -l session:cts.0 image:/usr/lib/heartbeat/c*
             if None:
                 self.rsh(node, "opcontrol --reset")
             else:
                 self.oprofileStop(node)
                 self.oprofileStart(node)
 
     def oprofileStop(self, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileStop(n)
 
         elif node in self.Env["oprofile"]:
             self.debug("Stopping oprofile on %s" % node)
             self.rsh(node, "opcontrol --reset")
             self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
 
 
     def StatsExtract(self):
         if not self.Env["stats"]:
             return
 
         for host in self.Env["nodes"]:
             log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
             if has_log_stats.has_key(host):
                 self.rsh(host, '''bash %s %s stop''' % (log_stats_bin, log_stats_file))
                 (rc, lines) = self.rsh(host, '''cat %s''' % log_stats_file, stdout=2)
                 self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
 
                 fname = "cts-stats-%d-nodes-%s.csv" % (len(self.Env["nodes"]), host)
                 print "Extracted stats: %s" % fname
                 fd = open(fname, "a")
                 fd.writelines(lines)
                 fd.close()
 
     def StatsMark(self, testnum):
         '''Mark the test number in the stats log'''
 
         global has_log_stats
         if not self.Env["stats"]:
             return
 
         for host in self.Env["nodes"]:
             log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
             if not has_log_stats.has_key(host):
 
                 global log_stats
                 global log_stats_bin
                 script=log_stats
                 #script = re.sub("\\\\", "\\\\", script)
                 script = re.sub('\"', '\\\"', script)
                 script = re.sub("'", "\'", script)
                 script = re.sub("`", "\`", script)
                 script = re.sub("\$", "\\\$", script)
 
                 self.debug("Installing %s on %s" % (log_stats_bin, host))
                 self.rsh(host, '''echo "%s" > %s''' % (script, log_stats_bin), silent=True)
                 self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
                 has_log_stats[host] = 1
 
             # Now mark it
             self.rsh(host, '''bash %s %s mark %s''' % (log_stats_bin, log_stats_file, testnum), synchronous=0)
 
 
 class Resource:
     '''
     This is an HA resource (not a resource group).
     A resource group is just an ordered list of Resource objects.
     '''
 
     def __init__(self, cm, rsctype=None, instance=None):
         self.CM = cm
         self.ResourceType = rsctype
         self.Instance = instance
         self.needs_quorum = 1
 
     def Type(self):
         return self.ResourceType
 
     def Instance(self, nodename):
         return self.Instance
 
     def IsRunningOn(self, nodename):
         '''
         This member function returns true if our resource is running
         on the given node in the cluster.
         It is analagous to the "status" operation on SystemV init scripts and
         heartbeat scripts.  FailSafe calls it the "exclusive" operation.
         '''
         raise ValueError("Abstract Class member (IsRunningOn)")
         return None
 
     def IsWorkingCorrectly(self, nodename):
         '''
         This member function returns true if our resource is operating
         correctly on the given node in the cluster.
         Heartbeat does not require this operation, but it might be called
         the Monitor operation, which is what FailSafe calls it.
         For remotely monitorable resources (like IP addresses), they *should*
         be monitored remotely for testing.
         '''
         raise ValueError("Abstract Class member (IsWorkingCorrectly)")
         return None
 
     def Start(self, nodename):
         '''
         This member function starts or activates the resource.
         '''
         raise ValueError("Abstract Class member (Start)")
         return None
 
     def Stop(self, nodename):
         '''
         This member function stops or deactivates the resource.
         '''
         raise ValueError("Abstract Class member (Stop)")
         return None
 
     def __repr__(self):
         if (self.Instance and len(self.Instance) > 1):
                 return "{" + self.ResourceType + "::" + self.Instance + "}"
         else:
                 return "{" + self.ResourceType + "}"
 
 
 class Component:
     def kill(self, node):
         None
 
 
 class Process(Component):
     def __init__(self, cm, name, process=None, dc_only=0, pats=[], dc_pats=[], badnews_ignore=[], common_ignore=[], triggersreboot=0):
         self.name = str(name)
         self.dc_only = dc_only
         self.pats = pats
         self.dc_pats = dc_pats
         self.CM = cm
         self.badnews_ignore = badnews_ignore
         self.badnews_ignore.extend(common_ignore)
 	self.triggersreboot = triggersreboot
 
         if process:
             self.proc = str(process)
         else:
             self.proc = str(name)
         self.KillCmd = "killall -9 " + self.proc
 
     def kill(self, node):
         if self.CM.rsh(node, self.KillCmd) != 0:
             self.CM.log ("ERROR: Kill %s failed on node %s" % (self.name,node))
             return None
         return 1
diff --git a/cts/CTSaudits.py b/cts/CTSaudits.py
index 902558b194..a6b6ac5bd8 100755
--- a/cts/CTSaudits.py
+++ b/cts/CTSaudits.py
@@ -1,864 +1,863 @@
 '''CTS: Cluster Testing System: Audit module
  '''
 
 __copyright__ = '''
 Copyright (C) 2000, 2001,2005 Alan Robertson <alanr@unix.sh>
 Licensed under the GNU GPL.
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
-import time, os, string, re, uuid
-import CTS
+import time, re, uuid
 from watcher import LogWatcher
 
 
 class ClusterAudit:
 
     def __init__(self, cm):
         self.CM = cm
 
     def __call__(self):
          raise ValueError("Abstract Class member (__call__)")
     
     def is_applicable(self):
         '''Return TRUE if we are applicable in the current test configuration'''
         raise ValueError("Abstract Class member (is_applicable)")
         return 1
 
     def log(self, args):
         self.CM.log("audit: %s" % args)
 
     def debug(self, args):
         self.CM.debug("audit: %s" % args)
 
     def name(self):
          raise ValueError("Abstract Class member (name)")
 
 AllAuditClasses = [ ]
 
 
 class LogAudit(ClusterAudit):
 
     def name(self):
         return "LogAudit"
 
     def __init__(self, cm):
         self.CM = cm
         self.kinds = [ "combined syslog", "journal", "remote" ]
 
     def RestartClusterLogging(self, nodes=None):
         if not nodes:
             nodes = self.CM.Env["nodes"]
 
         self.CM.debug("Restarting logging on: %s" % repr(nodes))
 
         for node in nodes:
             if self.CM.Env["have_systemd"]:
                 if self.CM.rsh(node, "systemctl stop systemd-journald.socket") != 0:
                     self.CM.log ("ERROR: Cannot stop 'systemd-journald' on %s" % node)
                 if self.CM.rsh(node, "systemctl start systemd-journald.service") != 0:
                     self.CM.log ("ERROR: Cannot start 'systemd-journald' on %s" % node)
 
             if self.CM.rsh(node, "service %s restart" % self.CM.Env["syslogd"]) != 0:
                 self.CM.log ("ERROR: Cannot restart '%s' on %s" % (self.CM.Env["syslogd"], node))
 
     def TestLogging(self):
         patterns = []
         prefix   = "Test message from"
         suffix   = str(uuid.uuid4())
         watch    = {}
 
         for node in self.CM.Env["nodes"]:
             # Look for the node name in two places to make sure 
             # that syslog is logging with the correct hostname
             m = re.search("^([^.]+).*", node)
             if m:
                 simple = m.group(1)
             else:
                 simple = node
             patterns.append("%s.*%s %s %s" % (simple, prefix, node, suffix))
 
         watch_pref = self.CM.Env["LogWatcher"]
         if watch_pref == "any": 
             for k in self.kinds:
                 watch[k] = LogWatcher(self.CM.Env["LogFileName"], patterns, "LogAudit", 5, silent=True, hosts=self.CM.Env["nodes"], kind=k)
                 watch[k].setwatch()
         else:
             k = watch_pref
             watch[k] = LogWatcher(self.CM.Env["LogFileName"], patterns, "LogAudit", 5, silent=True, hosts=self.CM.Env["nodes"], kind=k)
             watch[k].setwatch()
 
         if watch_pref == "any": self.CM.log("Writing log with key: %s" % (suffix))
         for node in self.CM.Env["nodes"]:
             cmd = "logger -p %s.info %s %s %s" % (self.CM.Env["SyslogFacility"], prefix, node, suffix)
             if self.CM.rsh(node, cmd, synchronous=0, silent=True) != 0:
                 self.CM.log ("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node))
 
         for k in self.kinds:
             if watch.has_key(k):
                 w = watch[k]
                 if watch_pref == "any": self.CM.log("Testing for %s logs" % (k))
                 w.lookforall(silent=True)
                 if not w.unmatched:
                     if watch_pref == "any": 
                         self.CM.log ("Continuing with %s-based log reader" % (w.kind))
                         self.CM.Env["LogWatcher"] = w.kind
                     return 1
 
         for k in watch.keys():
             w = watch[k]
             if w.unmatched:
                 for regex in w.unmatched:
                     self.CM.log ("Test message [%s] not found in %s logs." % (regex, w.kind))
 
         return 0
 
     def __call__(self):
         max = 3
         attempt = 0
 
         self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"])
         while attempt <= max and self.TestLogging() == 0:
             attempt = attempt + 1
             self.RestartClusterLogging()
             time.sleep(60*attempt)
 
         if attempt > max:
             self.CM.log("ERROR: Cluster logging unrecoverable.")
             return 0
 
         return 1
     
     def is_applicable(self):
         if self.CM.Env["DoBSC"]:
             return 0
         if self.CM.Env["LogAuditDisabled"]:
             return 0
         return 1
 
 
 class DiskAudit(ClusterAudit):
 
     def name(self):
         return "DiskspaceAudit"
 
     def __init__(self, cm):
         self.CM = cm
 
     def __call__(self):
         result = 1
         dfcmd = "df -BM /var/log | tail -1 | awk '{print $(NF-1)\" \"$(NF-2)}' | tr -d 'M%'"
 
         self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"])
         for node in self.CM.Env["nodes"]:
             dfout = self.CM.rsh(node, dfcmd, 1)
             if not dfout:
                 self.CM.log ("ERROR: Cannot execute remote df command [%s] on %s" % (dfcmd, node))
             else:
                 try:
                     (used, remain) = dfout.split()
                     used_percent = int(used)
                     remaining_mb = int(remain)
                 except (ValueError, TypeError):
                     self.CM.log("Warning: df output '%s' from %s was invalid [%s, %s]"
                                 % (dfout, node, used, remain))
                 else:
                     if remaining_mb < 10 or used_percent > 95:
                         self.CM.log("CRIT: Out of log disk space on %s (%d%% / %dMb)"
                                     % (node, used_percent, remaining_mb))
                         result = None
                         answer = raw_input('Continue? [nY] ')
                         if answer and answer == "n":
                             raise ValueError("Disk full on %s" % (node))
                             ret = 0
 
                     elif remaining_mb < 100 or used_percent > 90:
                         self.CM.log("WARN: Low on log disk space (%d Mbytes) on %s" % (remaining_mb, node))
         return result
     
     def is_applicable(self):
         if self.CM.Env["DoBSC"]:
             return 0
         return 1
 
 
 class FileAudit(ClusterAudit):
 
     def name(self):
         return "FileAudit"
 
     def __init__(self, cm):
         self.CM = cm
         self.known = []
 
     def __call__(self):
         result = 1
 
         self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"])
         for node in self.CM.Env["nodes"]:
 
             (rc, lsout) = self.CM.rsh(node, "ls -al /var/lib/heartbeat/cores/* | grep core.[0-9]", None)
             for line in lsout:
                 line = line.strip()
                 if line not in self.known:
                     result = 0
                     self.known.append(line)
                     self.CM.log("Warning: Pacemaker core file on %s: %s" % (node, line))
 
             (rc, lsout) = self.CM.rsh(node, "ls -al /var/lib/corosync | grep core.[0-9]", None)
             for line in lsout:
                 line = line.strip()
                 if line not in self.known:
                     result = 0
                     self.known.append(line)
                     self.CM.log("Warning: Corosync core file on %s: %s" % (node, line))
 
             if self.CM.ShouldBeStatus.has_key(node) and self.CM.ShouldBeStatus[node] == "down":
                 clean = 0
                 (rc, lsout) = self.CM.rsh(node, "ls -al /dev/shm | grep qb-", None)
                 for line in lsout:
                     result = 0
                     clean = 1
                     self.CM.log("Warning: Stale IPC file on %s: %s" % (node, line))
 
                 if clean:
                     (rc, lsout) = self.CM.rsh(node, "ps axf | grep -e pacemaker -e corosync", None)
                     for line in lsout:
                         self.CM.debug("ps[%s]: %s" % (node, line))
 
                     self.CM.rsh(node, "rm -f /dev/shm/qb-*")
 
             else:
                 self.CM.debug("Skipping %s" % node)
 
         return result
     
     def is_applicable(self):
         return 1
 
 
 class AuditResource:
     def __init__(self, cm, line):
         fields = line.split()
         self.CM = cm
         self.line = line
         self.type = fields[1]
         self.id = fields[2]
         self.clone_id = fields[3]
         self.parent = fields[4]
         self.rprovider = fields[5]
         self.rclass = fields[6]
         self.rtype = fields[7]
         self.host = fields[8]
         self.needs_quorum = fields[9]
         self.flags = int(fields[10])
         self.flags_s = fields[11]
 
         if self.parent == "NA":
             self.parent = None
 
     def unique(self):
         if self.flags & int("0x00000020", 16):
             return 1
         return 0
 
     def orphan(self):
         if self.flags & int("0x00000001", 16):
             return 1
         return 0
 
     def managed(self):
         if self.flags & int("0x00000002", 16):
             return 1
         return 0
             
 
 class AuditConstraint:
     def __init__(self, cm, line):
         fields = line.split()
         self.CM = cm
         self.line = line
         self.type = fields[1]
         self.id = fields[2]
         self.rsc = fields[3]
         self.target = fields[4]
         self.score = fields[5]
         self.rsc_role = fields[6]
         self.target_role = fields[7]
 
         if self.rsc_role == "NA":
             self.rsc_role = None
         if self.target_role == "NA":
             self.target_role = None
 
 
 class PrimitiveAudit(ClusterAudit):
     def name(self):
         return "PrimitiveAudit"
 
     def __init__(self, cm):
         self.CM = cm
 
     def doResourceAudit(self, resource, quorum):
         rc = 1
         active = self.CM.ResourceLocation(resource.id)
 
         if len(active) == 1:
             if quorum:
                 self.debug("Resource %s active on %s" % (resource.id, repr(active)))
                 
             elif resource.needs_quorum == 1:
                 self.CM.log("Resource %s active without quorum: %s" 
                             % (resource.id, repr(active)))
                 rc = 0
 
         elif not resource.managed():
             self.CM.log("Resource %s not managed. Active on %s"
                         % (resource.id, repr(active)))
 
         elif not resource.unique():
             # TODO: Figure out a clever way to actually audit these resource types
             if len(active) > 1:
                 self.debug("Non-unique resource %s is active on: %s" 
                               % (resource.id, repr(active)))
             else:
                 self.debug("Non-unique resource %s is not active" % resource.id)
 
         elif len(active) > 1:
             self.CM.log("Resource %s is active multiple times: %s" 
                         % (resource.id, repr(active)))
             rc = 0
             
         elif resource.orphan():
             self.debug("Resource %s is an inactive orphan" % resource.id)
 
         elif len(self.inactive_nodes) == 0:
             self.CM.log("WARN: Resource %s not served anywhere" % resource.id)
             rc = 0
 
         elif self.CM.Env["warn-inactive"] == 1:
             if quorum or not resource.needs_quorum:
                 self.CM.log("WARN: Resource %s not served anywhere (Inactive nodes: %s)" 
                             % (resource.id, repr(self.inactive_nodes)))
             else:
                 self.debug("Resource %s not served anywhere (Inactive nodes: %s)" 
                               % (resource.id, repr(self.inactive_nodes)))
 
         elif quorum or not resource.needs_quorum:
             self.debug("Resource %s not served anywhere (Inactive nodes: %s)" 
                           % (resource.id, repr(self.inactive_nodes)))
 
         return rc
 
     def setup(self):
         self.target = None
         self.resources = []
         self.constraints = []
         self.active_nodes = []
         self.inactive_nodes = []
 
         for node in self.CM.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.active_nodes.append(node)
             else:
                 self.inactive_nodes.append(node)
 
         for node in self.CM.Env["nodes"]:
             if self.target == None and self.CM.ShouldBeStatus[node] == "up":
                 self.target = node
 
         if not self.target:
             # TODO: In Pacemaker 1.0 clusters we'll be able to run crm_resource 
             # with CIB_file=/path/to/cib.xml even when the cluster isn't running
             self.debug("No nodes active - skipping %s" % self.name())
             return 0
 
         (rc, lines) = self.CM.rsh(self.target, "crm_resource -c", None)
 
         for line in lines:
             if re.search("^Resource", line):
                 self.resources.append(AuditResource(self.CM, line))
             elif re.search("^Constraint", line):
                 self.constraints.append(AuditConstraint(self.CM, line))
             else:
                 self.CM.log("Unknown entry: %s" % line);
 
         return 1
 
     def __call__(self):
         rc = 1
                 
         if not self.setup():
             return 1
 
         quorum = self.CM.HasQuorum(None)
         for resource in self.resources:
             if resource.type == "primitive":
                 if self.doResourceAudit(resource, quorum) == 0:
                     rc = 0
         return rc
 
     def is_applicable(self):
         if self.CM["Name"] == "crm-lha":
             return 1
         if self.CM["Name"] == "crm-ais":
             return 1
         return 0
 
 
 class GroupAudit(PrimitiveAudit):
     def name(self):
         return "GroupAudit"
 
     def __call__(self):
         rc = 1
         if not self.setup():
             return 1
 
         for group in self.resources:
             if group.type == "group":
                 first_match = 1
                 group_location = None
                 for child in self.resources:
                     if child.parent == group.id:
                         nodes = self.CM.ResourceLocation(child.id)
 
                         if first_match and len(nodes) > 0:
                             group_location = nodes[0]
 
                         first_match = 0
 
                         if len(nodes) > 1:
                             rc = 0
                             self.CM.log("Child %s of %s is active more than once: %s" 
                                         % (child.id, group.id, repr(nodes)))
 
                         elif len(nodes) == 0:
                             # Groups are allowed to be partially active
                             # However we do need to make sure later children aren't running
                             group_location = None
                             self.debug("Child %s of %s is stopped" % (child.id, group.id))
 
                         elif nodes[0] != group_location:  
                             rc = 0
                             self.CM.log("Child %s of %s is active on the wrong node (%s) expected %s" 
                                         % (child.id, group.id, nodes[0], group_location))
                         else:
                             self.debug("Child %s of %s is active on %s" % (child.id, group.id, nodes[0]))
 
         return rc
     
 
 class CloneAudit(PrimitiveAudit):
     def name(self):
         return "CloneAudit"
 
     def __call__(self):
         rc = 1
         if not self.setup():
             return 1
 
         for clone in self.resources:
             if clone.type == "clone":
                 for child in self.resources:
                     if child.parent == clone.id and child.type == "primitive":
                         self.debug("Checking child %s of %s..." % (child.id, clone.id))
                         # Check max and node_max
                         # Obtain with:
                         #    crm_resource -g clone_max --meta -r child.id
                         #    crm_resource -g clone_node_max --meta -r child.id
 
         return rc
     
 
 class ColocationAudit(PrimitiveAudit):
     def name(self):
         return "ColocationAudit"
 
     def crm_location(self, resource):
         (rc, lines) = self.CM.rsh(self.target, "crm_resource -W -r %s -Q"%resource, None)
         hosts = []
         if rc == 0:
             for line in lines:
                 fields = line.split()
                 hosts.append(fields[0])
 
         return hosts
 
     def __call__(self):
         rc = 1
         if not self.setup():
             return 1
 
         for coloc in self.constraints:
             if coloc.type == "rsc_colocation":
                 source = self.crm_location(coloc.rsc)
                 target = self.crm_location(coloc.target)
                 if len(source) == 0:
                     self.debug("Colocation audit (%s): %s not running" % (coloc.id, coloc.rsc))
                 else:
                     for node in source:
                         if not node in target:
                             rc = 0
                             self.CM.log("Colocation audit (%s): %s running on %s (not in %s)" 
                                         % (coloc.id, coloc.rsc, node, repr(target)))
                         else:
                             self.debug("Colocation audit (%s): %s running on %s (in %s)" 
                                           % (coloc.id, coloc.rsc, node, repr(target)))
 
         return rc
 
 
 class CrmdStateAudit(ClusterAudit):
     def __init__(self, cm):
         self.CM = cm
         self.Stats = {"calls":0
         ,        "success":0
         ,        "failure":0
         ,        "skipped":0
         ,        "auditfail":0}
 
     def has_key(self, key):
         return self.Stats.has_key(key)
 
     def __setitem__(self, key, value):
         self.Stats[key] = value
         
     def __getitem__(self, key):
         return self.Stats[key]
 
     def incr(self, name):
         '''Increment (or initialize) the value associated with the given name'''
         if not self.Stats.has_key(name):
             self.Stats[name] = 0
         self.Stats[name] = self.Stats[name]+1
 
     def __call__(self):
         passed = 1
         up_are_down = 0
         down_are_up = 0
         unstable_list = []
 
         for node in self.CM.Env["nodes"]:
             should_be = self.CM.ShouldBeStatus[node]
             rc = self.CM.test_node_CM(node)
             if rc > 0:
                 if should_be == "down":
                     down_are_up = down_are_up + 1
                 if rc == 1:
                     unstable_list.append(node)
             elif should_be == "up":
                 up_are_down = up_are_down + 1
 
         if len(unstable_list) > 0:
             passed = 0
             self.CM.log("Cluster is not stable: %d (of %d): %s" 
                      % (len(unstable_list), self.CM.upcount(), repr(unstable_list)))
 
         if up_are_down > 0:
             passed = 0
             self.CM.log("%d (of %d) nodes expected to be up were down."
                      % (up_are_down, len(self.CM.Env["nodes"])))
 
         if down_are_up > 0:
             passed = 0
             self.CM.log("%d (of %d) nodes expected to be down were up." 
                      % (down_are_up, len(self.CM.Env["nodes"])))
             
         return passed
 
     def name(self):
         return "CrmdStateAudit"
     
     def is_applicable(self):
         if self.CM["Name"] == "crm-lha":
             return 1
         if self.CM["Name"] == "crm-ais":
             return 1
         return 0
 
 
 class CIBAudit(ClusterAudit):
     def __init__(self, cm):
         self.CM = cm
         self.Stats = {"calls":0
         ,        "success":0
         ,        "failure":0
         ,        "skipped":0
         ,        "auditfail":0}
 
     def has_key(self, key):
         return self.Stats.has_key(key)
 
     def __setitem__(self, key, value):
         self.Stats[key] = value
         
     def __getitem__(self, key):
         return self.Stats[key]
     
     def incr(self, name):
         '''Increment (or initialize) the value associated with the given name'''
         if not self.Stats.has_key(name):
             self.Stats[name] = 0
         self.Stats[name] = self.Stats[name]+1
 
     def __call__(self):
         passed = 1
         ccm_partitions = self.CM.find_partitions()
 
         if len(ccm_partitions) == 0:
             self.debug("\tNo partitions to audit")
             return 1
         
         for partition in ccm_partitions:
             self.debug("\tAuditing CIB consistency for: %s" % partition)
             partition_passed = 0
             if self.audit_cib_contents(partition) == 0:
                 passed = 0
         
         return passed
 
     def audit_cib_contents(self, hostlist):
         passed = 1
         node0 = None
         node0_xml = None
 
         partition_hosts = hostlist.split()
         for node in partition_hosts:
             node_xml = self.store_remote_cib(node, node0)
 
             if node_xml == None:
                 self.CM.log("Could not perform audit: No configuration from %s" % node)
                 passed = 0
                 
             elif node0 == None:
                 node0 = node
                 node0_xml = node_xml
                 
             elif node0_xml == None: 
                 self.CM.log("Could not perform audit: No configuration from %s" % node0)
                 passed = 0
                 
             else:
                 (rc, result) = self.CM.rsh(
                     node0, "crm_diff -VV -cf --new %s --original %s" % (node_xml, node0_xml), None)
                 
                 if rc != 0:
                     self.CM.log("Diff between %s and %s failed: %d" % (node0_xml, node_xml, rc))
                     passed = 0
                     
                 for line in result:
                     if not re.search("<diff/>", line):
                         passed = 0
                         self.debug("CibDiff[%s-%s]: %s" % (node0, node, line)) 
                     else:
                         self.debug("CibDiff[%s-%s] Ignoring: %s" % (node0, node, line)) 
                         
 #            self.CM.rsh(node0, "rm -f %s" % node_xml)                        
 #        self.CM.rsh(node0, "rm -f %s" % node0_xml) 
         return passed
                 
     def store_remote_cib(self, node, target):
         combined = ""
         filename = "/tmp/ctsaudit.%s.xml" % node
 
         if not target:
             target = node
 
         (rc, lines) = self.CM.rsh(node, self.CM["CibQuery"], None)
         if rc != 0:
             self.CM.log("Could not retrieve configuration")
             return None
 
         self.CM.rsh("localhost", "rm -f %s" % filename)
         for line in lines:
             self.CM.rsh("localhost", "echo \'%s\' >> %s" % (line[:-1], filename), silent=True)
 
         if self.CM.rsh.cp(filename, "root@%s:%s" % (target, filename), silent=True) != 0:
             self.CM.log("Could not store configuration")
             return None
         return filename
 
     def name(self):
         return "CibAudit"
     
     def is_applicable(self):
         if self.CM["Name"] == "crm-lha":
             return 1
         if self.CM["Name"] == "crm-ais":
             return 1
         return 0
 
 
 class PartitionAudit(ClusterAudit):
     def __init__(self, cm):
         self.CM = cm
         self.Stats = {"calls":0
         ,        "success":0
         ,        "failure":0
         ,        "skipped":0
         ,        "auditfail":0}
         self.NodeEpoche = {}
         self.NodeState = {}
         self.NodeQuorum = {}
 
     def has_key(self, key):
         return self.Stats.has_key(key)
 
     def __setitem__(self, key, value):
         self.Stats[key] = value
         
     def __getitem__(self, key):
         return self.Stats[key]
     
     def incr(self, name):
         '''Increment (or initialize) the value associated with the given name'''
         if not self.Stats.has_key(name):
             self.Stats[name] = 0
         self.Stats[name] = self.Stats[name]+1
 
     def __call__(self):
         passed = 1
         ccm_partitions = self.CM.find_partitions()
 
         if ccm_partitions == None or len(ccm_partitions) == 0:
             return 1
 
         self.CM.cluster_stable(double_check=True)
 
         if len(ccm_partitions) != self.CM.partitions_expected:
             self.CM.log("ERROR: %d cluster partitions detected:" % len(ccm_partitions))
             passed = 0
             for partition in ccm_partitions:
                 self.CM.log("\t %s" % partition)
 
         for partition in ccm_partitions:
             partition_passed = 0
             if self.audit_partition(partition) == 0:
                 passed = 0
 
         return passed
 
     def trim_string(self, avalue):
         if not avalue:
             return None
         if len(avalue) > 1:
             return avalue[:-1]
 
     def trim2int(self, avalue):
         if not avalue:
             return None
         if len(avalue) > 1:
             return int(avalue[:-1])
 
     def audit_partition(self, partition):
         passed = 1
         dc_found = []
         dc_allowed_list = []
         lowest_epoche = None
         node_list = partition.split()
 
         self.debug("Auditing partition: %s" % (partition))
         for node in node_list:
             if self.CM.ShouldBeStatus[node] != "up":
                 self.CM.log("Warn: Node %s appeared out of nowhere" % (node))
                 self.CM.ShouldBeStatus[node] = "up"
                 # not in itself a reason to fail the audit (not what we're
                 #  checking for in this audit)
 
             self.NodeState[node]  = self.CM.rsh(node, self.CM["StatusCmd"] % node, 1)
             self.NodeEpoche[node] = self.CM.rsh(node, self.CM["EpocheCmd"], 1)
             self.NodeQuorum[node] = self.CM.rsh(node, self.CM["QuorumCmd"], 1)
             
             self.debug("Node %s: %s - %s - %s." % (node, self.NodeState[node], self.NodeEpoche[node], self.NodeQuorum[node]))
             self.NodeState[node]  = self.trim_string(self.NodeState[node])
             self.NodeEpoche[node] = self.trim2int(self.NodeEpoche[node])
             self.NodeQuorum[node] = self.trim_string(self.NodeQuorum[node])
 
             if not self.NodeEpoche[node]:
                 self.CM.log("Warn: Node %s dissappeared: cant determin epoche" % (node))
                 self.CM.ShouldBeStatus[node] = "down"
                 # not in itself a reason to fail the audit (not what we're
                 #  checking for in this audit)
             elif lowest_epoche == None or self.NodeEpoche[node] < lowest_epoche:
                 lowest_epoche = self.NodeEpoche[node]
                 
         if not lowest_epoche:
             self.CM.log("Lowest epoche not determined in %s" % (partition))
             passed = 0
 
         for node in node_list:
             if self.CM.ShouldBeStatus[node] == "up":
                 if self.CM.is_node_dc(node, self.NodeState[node]):
                     dc_found.append(node)
                     if self.NodeEpoche[node] == lowest_epoche:
                         self.debug("%s: OK" % node)
                     elif not self.NodeEpoche[node]:
                         self.debug("Check on %s ignored: no node epoche" % node)
                     elif not lowest_epoche:
                         self.debug("Check on %s ignored: no lowest epoche" % node)
                     else:
                         self.CM.log("DC %s is not the oldest node (%d vs. %d)"
                             % (node, self.NodeEpoche[node], lowest_epoche))
                         passed = 0
 
         if len(dc_found) == 0:
             self.CM.log("DC not found on any of the %d allowed nodes: %s (of %s)"
                         % (len(dc_allowed_list), str(dc_allowed_list), str(node_list)))
 
         elif len(dc_found) > 1:
             self.CM.log("%d DCs (%s) found in cluster partition: %s"
                         % (len(dc_found), str(dc_found), str(node_list)))
             passed = 0
 
         if passed == 0:
             for node in node_list:
                 if self.CM.ShouldBeStatus[node] == "up":
                     self.CM.log("epoche %s : %s"  
                                 % (self.NodeEpoche[node], self.NodeState[node]))
 
         return passed
 
     def name(self):
         return "PartitionAudit"
     
     def is_applicable(self):
         if self.CM["Name"] == "crm-lha":
             return 1
         if self.CM["Name"] == "crm-ais":
             return 1
         return 0
 
 AllAuditClasses.append(DiskAudit)
 AllAuditClasses.append(FileAudit)
 AllAuditClasses.append(LogAudit)
 AllAuditClasses.append(CrmdStateAudit)
 AllAuditClasses.append(PartitionAudit)
 AllAuditClasses.append(PrimitiveAudit)
 AllAuditClasses.append(GroupAudit)
 AllAuditClasses.append(CloneAudit)
 AllAuditClasses.append(ColocationAudit)
 AllAuditClasses.append(CIBAudit)
 
 
 def AuditList(cm):
     result = []
     for auditclass in AllAuditClasses:
         a = auditclass(cm)
         if a.is_applicable():
             result.append(a)
     return result
diff --git a/cts/CTSlab.py b/cts/CTSlab.py
index a3494e1b4e..7bccb97981 100755
--- a/cts/CTSlab.py
+++ b/cts/CTSlab.py
@@ -1,158 +1,155 @@
 #!/usr/bin/python
 
 '''CTS: Cluster Testing System: Lab environment module
  '''
 
 __copyright__ = '''
 Copyright (C) 2001,2005 Alan Robertson <alanr@unix.sh>
 Licensed under the GNU GPL.
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
-from UserDict import UserDict
-import sys, types, string, string, signal, os, socket
+import sys, signal, os
 
 pdir = os.path.dirname(sys.path[0])
 sys.path.insert(0, pdir) # So that things work from the source directory
 
 try:
     from cts.CTSvars      import *
     from cts.CM_ais       import *
     from cts.CM_lha       import crm_lha
     from cts.CTSaudits    import AuditList
     from cts.CTStests     import TestList
     from cts.CTSscenarios import *
     from cts.logging      import LogFactory
 except ImportError as e:
     sys.stderr.write("abort: %s\n" % e)
     sys.stderr.write("check your install and PYTHONPATH; couldn't find cts libraries in:\n%s\n" %
                      ' '.join(sys.path))
     sys.exit(1)
 
 # These are globals so they can be used by the signal handler.
 cm = None
 scenario = None
 LogFactory().add_stderr()
 
 
 def sig_handler(signum, frame) :
     LogFactory().log("Interrupted by signal %d"%signum)
     if scenario: scenario.summarize()
     if signum == 15 :
         if scenario: scenario.TearDown()
         sys.exit(1)
 
 
 if __name__ == '__main__':
 
     Environment = CtsLab(sys.argv[1:])
     NumIter = Environment["iterations"]
     Tests = []
 
     # Set the signal handler
     signal.signal(15, sig_handler)
     signal.signal(10, sig_handler)
 
     # Create the Cluster Manager object
     if Environment["Stack"] == "heartbeat":
         cm = crm_lha(Environment)
 
-    elif Environment["Stack"] == "openais (whitetank)":
-        cm = crm_whitetank(Environment)
-        
     elif Environment["Stack"] == "corosync 2.x":
         cm = crm_mcp(Environment)
         
     elif Environment["Stack"] == "corosync (cman)":
         cm = crm_cman(Environment)
         
     elif Environment["Stack"] == "corosync (plugin v1)":
         cm = crm_cs_v1(Environment)
         
     elif Environment["Stack"] == "corosync (plugin v0)":
         cm = crm_cs_v0(Environment)
     else:
         LogFactory().log("Unknown stack: "+Environment["stack"])
         sys.exit(1)
 
     if Environment["TruncateLog"] == 1:
         if Environment["OutputFile"] is None:
             LogFactory().log("Ignoring truncate request because no output file specified")
         else:
             LogFactory().log("Truncating %s" % Environment["OutputFile"])
             with open(Environment["OutputFile"], "w") as outputfile:
                 outputfile.truncate(0)
 
     Audits = AuditList(cm)
 
     if Environment["ListTests"] == 1:
         Tests = TestList(cm, Audits)
         LogFactory().log("Total %d tests"%len(Tests))
         for test in Tests :
             LogFactory().log(str(test.name));
         sys.exit(0)
 
     elif len(Environment["tests"]) == 0:
         Tests = TestList(cm, Audits)
 
     else:
         Chosen = Environment["tests"]
         for TestCase in Chosen:
            match = None
 
            for test in TestList(cm, Audits):
                if test.name == TestCase:
                    match = test
 
            if not match:
-               usage("--choose: No applicable/valid tests chosen")
+               LogFactory().log("--choose: No applicable/valid tests chosen")
+               sys.exit(1)
            else:
                Tests.append(match)
 
     # Scenario selection
     if Environment["scenario"] == "basic-sanity":
         scenario = RandomTests(cm, [ BasicSanityCheck(Environment) ], Audits, Tests)
 
     elif Environment["scenario"] == "all-once":
         NumIter = len(Tests)
         scenario = AllOnce(
             cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests)
     elif Environment["scenario"] == "sequence":
         scenario = Sequence(
             cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests)
     elif Environment["scenario"] == "boot":
         scenario = Boot(cm, [ LeaveBooted(Environment)], Audits, [])
     else:
         scenario = RandomTests(
             cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests)
 
     LogFactory().log(">>>>>>>>>>>>>>>> BEGINNING " + repr(NumIter) + " TESTS ")
     LogFactory().log("Stack:                  %s (%s)" % (Environment["Stack"], Environment["Name"]))
     LogFactory().log("Schema:                 %s" % Environment["Schema"])
     LogFactory().log("Scenario:               %s" % scenario.__doc__)
     LogFactory().log("CTS Master:             %s" % Environment["cts-master"])
     LogFactory().log("CTS Logfile:            %s" % Environment["OutputFile"])
     LogFactory().log("Random Seed:            %s" % Environment["RandSeed"])
     LogFactory().log("Syslog variant:         %s" % Environment["syslogd"].strip())
     LogFactory().log("System log files:       %s" % Environment["LogFileName"])
     if Environment.has_key("IPBase"):
         LogFactory().log("Base IP for resources:  %s" % Environment["IPBase"])
     LogFactory().log("Cluster starts at boot: %d" % Environment["at-boot"])
 
     Environment.dump()
     rc = Environment.run(scenario, NumIter)
     sys.exit(rc)
diff --git a/cts/CTStests.py b/cts/CTStests.py
index d2b7668ac3..53a010f49f 100644
--- a/cts/CTStests.py
+++ b/cts/CTStests.py
@@ -1,3202 +1,3201 @@
 '''CTS: Cluster Testing System: Tests module
 
 There are a few things we want to do here:
 
  '''
 
 __copyright__ = '''
 Copyright (C) 2000, 2001 Alan Robertson <alanr@unix.sh>
 Licensed under the GNU GPL.
 
 Add RecourceRecover testcase Zhao Kai <zhaokai@cn.ibm.com>
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
 #
 #        SPECIAL NOTE:
 #
 #        Tests may NOT implement any cluster-manager-specific code in them.
 #        EXTEND the ClusterManager object to provide the base capabilities
 #        the test needs if you need to do something that the current CM classes
 #        do not.  Otherwise you screw up the whole point of the object structure
 #        in CTS.
 #
 #                Thank you.
 #
 
-import time, os, re, types, string, tempfile, sys
+import time, os, re, string, tempfile
 from stat import *
 from cts import CTS
 from cts.CTSaudits import *
 from cts.CTSvars   import *
 from cts.patterns  import PatternSelector
 from cts.logging   import LogFactory
 from cts.remote    import RemoteFactory
 from cts.watcher   import LogWatcher
 from cts.environment import EnvFactory
 
 AllTestClasses = [ ]
 
 
 class CTSTest:
     '''
     A Cluster test.
     We implement the basic set of properties and behaviors for a generic
     cluster test.
 
     Cluster tests track their own statistics.
     We keep each of the kinds of counts we track as separate {name,value}
     pairs.
     '''
 
     def __init__(self, cm):
         #self.name="the unnamed test"
         self.Stats = {"calls":0
         ,        "success":0
         ,        "failure":0
         ,        "skipped":0
         ,        "auditfail":0}
 
 #        if not issubclass(cm.__class__, ClusterManager):
 #            raise ValueError("Must be a ClusterManager object")
         self.CM = cm
         self.Env = EnvFactory().getInstance()
         self.rsh = RemoteFactory().getInstance()
         self.logger = LogFactory()
         self.templates = PatternSelector(cm["Name"])
         self.Audits = []
         self.timeout = 120
         self.passed = 1
         self.is_loop = 0
         self.is_unsafe = 0
         self.is_docker_unsafe = 0
         self.is_experimental = 0
         self.is_container = 0
         self.is_valgrind = 0
         self.benchmark = 0  # which tests to benchmark
         self.timer = {}  # timers
 
     def log(self, args):
         self.logger.log(args)
 
     def debug(self, args):
         self.logger.debug(args)
 
     def has_key(self, key):
         return self.Stats.has_key(key)
 
     def __setitem__(self, key, value):
         self.Stats[key] = value
 
     def __getitem__(self, key):
         return self.Stats[key]
 
     def log_mark(self, msg):
         self.debug("MARK: test %s %s %d" % (self.name,msg,time.time()))
         return
 
     def get_timer(self,key = "test"):
         try: return self.timer[key]
         except: return 0
 
     def set_timer(self,key = "test"):
         self.timer[key] = time.time()
         return self.timer[key]
 
     def log_timer(self,key = "test"):
         elapsed = 0
         if key in self.timer:
             elapsed = time.time() - self.timer[key]
             s = key == "test" and self.name or "%s:%s" % (self.name,key)
             self.debug("%s runtime: %.2f" % (s, elapsed))
             del self.timer[key]
         return elapsed
 
     def incr(self, name):
         '''Increment (or initialize) the value associated with the given name'''
         if not self.Stats.has_key(name):
             self.Stats[name] = 0
         self.Stats[name] = self.Stats[name]+1
 
         # Reset the test passed boolean
         if name == "calls":
             self.passed = 1
 
     def failure(self, reason="none"):
         '''Increment the failure count'''
         self.passed = 0
         self.incr("failure")
         self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason)
         return None
 
     def success(self):
         '''Increment the success count'''
         self.incr("success")
         return 1
 
     def skipped(self):
         '''Increment the skipped count'''
         self.incr("skipped")
         return 1
 
     def __call__(self, node):
         '''Perform the given test'''
         raise ValueError("Abstract Class member (__call__)")
         self.incr("calls")
         return self.failure()
 
     def audit(self):
         passed = 1
         if len(self.Audits) > 0:
             for audit in self.Audits:
                 if not audit():
                     self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name()))
                     self.incr("auditfail")
                     passed = 0
         return passed
 
     def setup(self, node):
         '''Setup the given test'''
         return self.success()
 
     def teardown(self, node):
         '''Tear down the given test'''
         return self.success()
 
     def create_watch(self, patterns, timeout, name=None):
         if not name:
             name = self.name
         return LogWatcher(self.Env["LogFileName"], patterns, name, timeout, kind=self.Env["LogWatcher"], hosts=self.Env["nodes"])
 
     def local_badnews(self, prefix, watch, local_ignore=[]):
         errcount = 0
         if not prefix:
             prefix = "LocalBadNews:"
 
         ignorelist = []
         ignorelist.append(" CTS: ")
         ignorelist.append(prefix)
         ignorelist.extend(local_ignore)
 
         while errcount < 100:
             match = watch.look(0)
             if match:
                add_err = 1
                for ignore in ignorelist:
                    if add_err == 1 and re.search(ignore, match):
                        add_err = 0
                if add_err == 1:
                    self.logger.log(prefix + " " + match)
                    errcount = errcount + 1
             else:
               break
         else:
             self.logger.log("Too many errors!")
 
         watch.end()
         return errcount
 
     def is_applicable(self):
         return self.is_applicable_common()
 
     def is_applicable_common(self):
         '''Return TRUE if we are applicable in the current test configuration'''
         #raise ValueError("Abstract Class member (is_applicable)")
 
         if self.is_loop and not self.Env["loop-tests"]:
             return 0
         elif self.is_unsafe and not self.Env["unsafe-tests"]:
             return 0
         elif self.is_valgrind and not self.Env["valgrind-tests"]:
             return 0
         elif self.is_experimental and not self.Env["experimental-tests"]:
             return 0
         elif self.is_docker_unsafe and self.Env["docker"]:
             return 0
         elif self.is_container and not self.Env["container-tests"]:
             return 0
         elif self.Env["benchmark"] and self.benchmark == 0:
             return 0
 
         return 1
 
     def find_ocfs2_resources(self, node):
         self.r_o2cb = None
         self.r_ocfs2 = []
 
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rtype == "o2cb" and r.parent != "NA":
                     self.debug("Found o2cb: %s" % self.r_o2cb)
                     self.r_o2cb = r.parent
             if re.search("^Constraint", line):
                 c = AuditConstraint(self.CM, line)
                 if c.type == "rsc_colocation" and c.target == self.r_o2cb:
                     self.r_ocfs2.append(c.rsc)
 
         self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2))
         return len(self.r_ocfs2)
 
     def canrunnow(self, node):
         '''Return TRUE if we can meaningfully run right now'''
         return 1
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return []
 
 
 class StopTest(CTSTest):
     '''Stop (deactivate) the cluster manager on a node'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "Stop"
 
     def __call__(self, node):
         '''Perform the 'stop' test. '''
         self.incr("calls")
         if self.CM.ShouldBeStatus[node] != "up":
             return self.skipped()
 
         patterns = []
         # Technically we should always be able to notice ourselves stopping
         patterns.append(self.templates["Pat:We_stopped"] % node)
 
         #if self.Env["use_logd"]:
         #    patterns.append(self.templates["Pat:Logd_stopped"] % node)
 
         # Any active node needs to notice this one left
         # NOTE: This wont work if we have multiple partitions
         for other in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[other] == "up" and other != node:
                 patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node)))
                 #self.debug("Checking %s will notice %s left"%(other, node))
 
         watch = self.create_watch(patterns, self.Env["DeadTime"])
         watch.setwatch()
 
         if node == self.CM.OurNode:
             self.incr("us")
         else:
             if self.CM.upcount() <= 1:
                 self.incr("all")
             else:
                 self.incr("them")
 
         self.CM.StopaCM(node)
         watch_result = watch.lookforall()
 
         failreason = None
         UnmatchedList = "||"
         if watch.unmatched:
             (rc, output) = self.rsh(node, "/bin/ps axf", None)
             for line in output:
                 self.debug(line)
 
             (rc, output) = self.rsh(node, "/usr/sbin/dlm_tool dump", None)
             for line in output:
                 self.debug(line)
 
             for regex in watch.unmatched:
                 self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex))
                 UnmatchedList +=  regex + "||";
                 failreason = "Missing shutdown pattern"
 
         self.CM.cluster_stable(self.Env["DeadTime"])
 
         if not watch.unmatched or self.CM.upcount() == 0:
             return self.success()
 
         if len(watch.unmatched) >= self.CM.upcount():
             return self.failure("no match against (%s)" % UnmatchedList)
 
         if failreason == None:
             return self.success()
         else:
             return self.failure(failreason)
 #
 # We don't register StopTest because it's better when called by
 # another test...
 #
 
 
 class StartTest(CTSTest):
     '''Start (activate) the cluster manager on a node'''
     def __init__(self, cm, debug=None):
         CTSTest.__init__(self,cm)
         self.name = "start"
         self.debug = debug
 
     def __call__(self, node):
         '''Perform the 'start' test. '''
         self.incr("calls")
 
         if self.CM.upcount() == 0:
             self.incr("us")
         else:
             self.incr("them")
 
         if self.CM.ShouldBeStatus[node] != "down":
             return self.skipped()
         elif self.CM.StartaCM(node):
             return self.success()
         else:
             return self.failure("Startup %s on node %s failed"
                                 % (self.Env["Name"], node))
 
 #
 # We don't register StartTest because it's better when called by
 # another test...
 #
 
 
 class FlipTest(CTSTest):
     '''If it's running, stop it.  If it's stopped start it.
        Overthrow the status quo...
     '''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Flip"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
 
     def __call__(self, node):
         '''Perform the 'Flip' test. '''
         self.incr("calls")
         if self.CM.ShouldBeStatus[node] == "up":
             self.incr("stopped")
             ret = self.stop(node)
             type = "up->down"
             # Give the cluster time to recognize it's gone...
             time.sleep(self.Env["StableTime"])
         elif self.CM.ShouldBeStatus[node] == "down":
             self.incr("started")
             ret = self.start(node)
             type = "down->up"
         else:
             return self.skipped()
 
         self.incr(type)
         if ret:
             return self.success()
         else:
             return self.failure("%s failure" % type)
 
 #        Register FlipTest as a good test to run
 AllTestClasses.append(FlipTest)
 
 
 class RestartTest(CTSTest):
     '''Stop and restart a node'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Restart"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
         self.benchmark = 1
 
     def __call__(self, node):
         '''Perform the 'restart' test. '''
         self.incr("calls")
 
         self.incr("node:" + node)
 
         ret1 = 1
         if self.CM.StataCM(node):
             self.incr("WasStopped")
             if not self.start(node):
                 return self.failure("start (setup) failure: "+node)
 
         self.set_timer()
         if not self.stop(node):
             return self.failure("stop failure: "+node)
         if not self.start(node):
             return self.failure("start failure: "+node)
         return self.success()
 
 #        Register RestartTest as a good test to run
 AllTestClasses.append(RestartTest)
 
 
 class StonithdTest(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "Stonithd"
         self.startall = SimulStartLite(cm)
         self.benchmark = 1
 
     def __call__(self, node):
         self.incr("calls")
         if len(self.Env["nodes"]) < 2:
             return self.skipped()
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         is_dc = self.CM.is_node_dc(node)
 
         watchpats = []
         watchpats.append(self.templates["Pat:FenceOpOK"] % node)
         watchpats.append(self.templates["Pat:NodeFenced"] % node)
 
         if self.Env["at-boot"] == 0:
             self.debug("Expecting %s to stay down" % node)
             self.CM.ShouldBeStatus[node] = "down"
         else:
             self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"]))
             watchpats.append("%s.* S_STARTING -> S_PENDING" % node)
             watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node)
 
         watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"])
         watch.setwatch()
 
         origin = self.Env.RandomGen.choice(self.Env["nodes"])
 
         rc = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node)
 
         if rc == 194:
             # 194 - 256 = -62 = Timer expired
             #
             # Look for the patterns, usually this means the required
             # device was running on the node to be fenced - or that
             # the required devices were in the process of being loaded
             # and/or moved
             #
             # Effectively the node committed suicide so there will be
             # no confirmation, but pacemaker should be watching and
             # fence the node again
 
             self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node))
 
         elif origin != node and rc != 0:
             self.debug("Waiting for the cluster to recover")
             self.CM.cluster_stable()
 
             self.debug("Waiting STONITHd node to come back up")
             self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
 
             self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc))
 
         elif origin == node and rc != 255:
             # 255 == broken pipe, ie. the node was fenced as epxected
             self.logger.log("Logcally originated fencing returned %d" % rc)
 
         self.set_timer("fence")
         matched = watch.lookforall()
         self.log_timer("fence")
         self.set_timer("reform")
         if watch.unmatched:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         self.debug("Waiting STONITHd node to come back up")
         self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
 
         self.debug("Waiting for the cluster to re-stabilize with all nodes")
         is_stable = self.CM.cluster_stable(self.Env["StartTime"])
 
         if not matched:
             return self.failure("Didn't find all expected patterns")
         elif not is_stable:
             return self.failure("Cluster did not become stable")
 
         self.log_timer("reform")
         return self.success()
 
     def errorstoignore(self):
         return [
             self.templates["Pat:Fencing_start"] % ".*",
             self.templates["Pat:Fencing_ok"] % ".*",
-            "error: native_create_actions: Resource .*stonith::.* is active on 2 nodes attempting recovery",
-            "error: remote_op_done: Operation reboot of .*by .* for stonith_admin.*: Timer expired",
-            ]
+            r"error.*: Resource .*stonith::.* is active on 2 nodes attempting recovery",
+            r"error.*: Operation reboot of .*by .* for stonith_admin.*: Timer expired",
+        ]
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return 0
 
         if self.Env.has_key("DoFencing"):
             return self.Env["DoFencing"]
 
         return 1
 
 AllTestClasses.append(StonithdTest)
 
 
 class StartOnebyOne(CTSTest):
     '''Start all the nodes ~ one by one'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "StartOnebyOne"
         self.stopall = SimulStopLite(cm)
         self.start = StartTest(cm)
         self.ns = CTS.NodeStatus(cm.Env)
 
     def __call__(self, dummy):
         '''Perform the 'StartOnebyOne' test. '''
         self.incr("calls")
 
         #        We ignore the "node" parameter...
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Test setup failed")
 
         failed = []
         self.set_timer()
         for node in self.Env["nodes"]:
             if not self.start(node):
                 failed.append(node)
 
         if len(failed) > 0:
             return self.failure("Some node failed to start: " + repr(failed))
 
         return self.success()
 
 #        Register StartOnebyOne as a good test to run
 AllTestClasses.append(StartOnebyOne)
 
 
 class SimulStart(CTSTest):
     '''Start all the nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStart"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'SimulStart' test. '''
         self.incr("calls")
 
         #        We ignore the "node" parameter...
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Setup failed")
 
         self.CM.clear_all_caches()
 
         if not self.startall(None):
             return self.failure("Startall failed")
 
         return self.success()
 
 #        Register SimulStart as a good test to run
 AllTestClasses.append(SimulStart)
 
 
 class SimulStop(CTSTest):
     '''Stop all the nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStop"
         self.startall = SimulStartLite(cm)
         self.stopall = SimulStopLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'SimulStop' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         if not self.stopall(None):
             return self.failure("Stopall failed")
 
         return self.success()
 
 #     Register SimulStop as a good test to run
 AllTestClasses.append(SimulStop)
 
 
 class StopOnebyOne(CTSTest):
     '''Stop all the nodes in order'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "StopOnebyOne"
         self.startall = SimulStartLite(cm)
         self.stop = StopTest(cm)
 
     def __call__(self, dummy):
         '''Perform the 'StopOnebyOne' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         failed = []
         self.set_timer()
         for node in self.Env["nodes"]:
             if not self.stop(node):
                 failed.append(node)
 
         if len(failed) > 0:
             return self.failure("Some node failed to stop: " + repr(failed))
 
         self.CM.clear_all_caches()
         return self.success()
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(StopOnebyOne)
 
 
 class RestartOnebyOne(CTSTest):
     '''Restart all the nodes in order'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RestartOnebyOne"
         self.startall = SimulStartLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'RestartOnebyOne' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         did_fail = []
         self.set_timer()
         self.restart = RestartTest(self.CM)
         for node in self.Env["nodes"]:
             if not self.restart(node):
                 did_fail.append(node)
 
         if did_fail:
             return self.failure("Could not restart %d nodes: %s"
                                 % (len(did_fail), repr(did_fail)))
         return self.success()
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(RestartOnebyOne)
 
 
 class PartialStart(CTSTest):
     '''Start a node - but tell it to stop before it finishes starting up'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "PartialStart"
         self.startall = SimulStartLite(cm)
         self.stopall = SimulStopLite(cm)
         self.stop = StopTest(cm)
         #self.is_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'PartialStart' test. '''
         self.incr("calls")
 
         ret = self.stopall(None)
         if not ret:
             return self.failure("Setup failed")
 
 #   FIXME!  This should use the CM class to get the pattern
 #       then it would be applicable in general
         watchpats = []
         watchpats.append("crmd.*Connecting to cluster infrastructure")
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
         watch.setwatch()
 
         self.CM.StartaCMnoBlock(node)
         ret = watch.lookforall()
         if not ret:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
             return self.failure("Setup of %s failed" % node)
 
         ret = self.stop(node)
         if not ret:
             return self.failure("%s did not stop in time" % node)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
 
         # We might do some fencing in the 2-node case if we make it up far enough
-        return [ """Executing reboot fencing operation""" ]
+        return [
+            """Executing reboot fencing operation""",
+        ]
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(PartialStart)
 
 
 class StandbyTest(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Standby"
         self.benchmark = 1
 
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
 
     # make sure the node is active
     # set the node to standby mode
     # check resources, none resource should be running on the node
     # set the node to active mode
     # check resouces, resources should have been migrated back (SHOULD THEY?)
 
     def __call__(self, node):
 
         self.incr("calls")
         ret = self.startall(None)
         if not ret:
             return self.failure("Start all nodes failed")
 
         self.debug("Make sure node %s is active" % node)
         if self.CM.StandbyStatus(node) != "off":
             if not self.CM.SetStandbyMode(node, "off"):
                 return self.failure("can't set node %s to active mode" % node)
 
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "off":
             return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
 
         self.debug("Getting resources running on node %s" % node)
         rsc_on_node = self.CM.active_resources(node)
 
         watchpats = []
-        watchpats.append("do_state_transition:.*-> S_POLICY_ENGINE")
+        watchpats.append(r"State transition .* -> S_POLICY_ENGINE")
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
         watch.setwatch()
 
         self.debug("Setting node %s to standby mode" % node)
         if not self.CM.SetStandbyMode(node, "on"):
             return self.failure("can't set node %s to standby mode" % node)
 
         self.set_timer("on")
 
         ret = watch.lookforall()
         if not ret:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
             self.CM.SetStandbyMode(node, "off")
             return self.failure("cluster didn't react to standby change on %s" % node)
 
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "on":
             return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status))
         self.log_timer("on")
 
         self.debug("Checking resources")
         bad_run = self.CM.active_resources(node)
         if len(bad_run) > 0:
             rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run)))
             self.debug("Setting node %s to active mode" % node)
             self.CM.SetStandbyMode(node, "off")
             return rc
 
         self.debug("Setting node %s to active mode" % node)
         if not self.CM.SetStandbyMode(node, "off"):
             return self.failure("can't set node %s to active mode" % node)
 
         self.set_timer("off")
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "off":
             return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
         self.log_timer("off")
 
         return self.success()
 
 AllTestClasses.append(StandbyTest)
 
 
 class ValgrindTest(CTSTest):
     '''Check for memory leaks'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Valgrind"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
         self.is_valgrind = 1
         self.is_loop = 1
 
     def setup(self, node):
         self.incr("calls")
 
         ret = self.stopall(None)
         if not ret:
             return self.failure("Stop all nodes failed")
 
         # Enable valgrind
         self.logger.logPat = "/tmp/%s-*.valgrind" % self.name
 
         self.Env["valgrind-prefix"] = self.name
 
         self.rsh(node, "rm -f %s" % self.logger.logPat, None)
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Start all nodes failed")
 
         for node in self.Env["nodes"]:
             (rc, output) = self.rsh(node, "ps u --ppid `pidofproc aisexec`", None)
             for line in output:
                 self.debug(line)
 
         return self.success()
 
     def teardown(self, node):
         # Disable valgrind
         self.Env["valgrind-prefix"] = None
 
         # Return all nodes to normal
         ret = self.stopall(None)
         if not ret:
             return self.failure("Stop all nodes failed")
 
         return self.success()
 
     def find_leaks(self):
         # Check for leaks
         leaked = []
         self.stop = StopTest(self.CM)
 
         for node in self.Env["nodes"]:
             (rc, ps_out) = self.rsh(node, "ps u --ppid `pidofproc aisexec`", None)
             rc = self.stop(node)
             if not rc:
                 self.failure("Couldn't shut down %s" % node)
 
             rc = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat, 0)
             if rc != 1:
                 leaked.append(node)
                 self.failure("Valgrind errors detected on %s" % node)
                 for line in ps_out:
                     self.logger.log(line)
                 (rc, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, None)
                 for line in output:
                     self.logger.log(line)
                 (rc, output) = self.rsh(node, "cat %s" % self.logger.logPat, None)
                 for line in output:
                     self.debug(line)
 
         self.rsh(node, "rm -f %s" % self.logger.logPat, None)
         return leaked
 
     def __call__(self, node):
         leaked = self.find_leaks()
         if len(leaked) > 0:
             return self.failure("Nodes %s leaked" % repr(leaked))
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
-        return [ """cib:.*readCibXmlFile:""", """HA_VALGRIND_ENABLED""" ]
+        return [
+            r"cib.*: \*\*\*\*\*\*\*\*\*\*\*\*\*",
+            r"cib.*: .* avoid confusing Valgrind",
+            r"HA_VALGRIND_ENABLED",
+        ]
 
 
 class StandbyLoopTest(ValgrindTest):
     '''Check for memory leaks by putting a node in and out of standby for an hour'''
     def __init__(self, cm):
         ValgrindTest.__init__(self,cm)
         self.name = "StandbyLoop"
 
     def __call__(self, node):
 
         lpc = 0
         delay = 2
         failed = 0
         done = time.time() + self.Env["loop-minutes"] * 60
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             time.sleep(delay)
             if not self.CM.SetStandbyMode(node, "on"):
                 self.failure("can't set node %s to standby mode" % node)
                 failed = lpc
 
             time.sleep(delay)
             if not self.CM.SetStandbyMode(node, "off"):
                 self.failure("can't set node %s to active mode" % node)
                 failed = lpc
 
         leaked = self.find_leaks()
         if failed:
             return self.failure("Iteration %d failed" % failed)
         elif len(leaked) > 0:
             return self.failure("Nodes %s leaked" % repr(leaked))
 
         return self.success()
 
 AllTestClasses.append(StandbyLoopTest)
 
 
 class BandwidthTest(CTSTest):
 #        Tests should not be cluster-manager-specific
 #        If you need to find out cluster manager configuration to do this, then
 #        it should be added to the generic cluster manager API.
     '''Test the bandwidth which heartbeat uses'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "Bandwidth"
         self.start = StartTest(cm)
         self.__setitem__("min",0)
         self.__setitem__("max",0)
         self.__setitem__("totalbandwidth",0)
         self.tempfile = tempfile.mktemp(".cts")
         self.startall = SimulStartLite(cm)
 
     def __call__(self, node):
         '''Perform the Bandwidth test'''
         self.incr("calls")
 
         if self.CM.upcount() < 1:
             return self.skipped()
 
         Path = self.CM.InternalCommConfig()
         if "ip" not in Path["mediatype"]:
              return self.skipped()
 
         port = Path["port"][0]
         port = int(port)
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Test setup failed")
         time.sleep(5)  # We get extra messages right after startup.
 
         fstmpfile = "/var/run/band_estimate"
         dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \
         %                (port, fstmpfile)
 
         rc = self.rsh(node, dumpcmd)
         if rc == 0:
             farfile = "root@%s:%s" % (node, fstmpfile)
             self.rsh.cp(farfile, self.tempfile)
             Bandwidth = self.countbandwidth(self.tempfile)
             if not Bandwidth:
                 self.logger.log("Could not compute bandwidth.")
                 return self.success()
             intband = int(Bandwidth + 0.5)
             self.logger.log("...bandwidth: %d bits/sec" % intband)
             self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth
             if self.Stats["min"] == 0:
                 self.Stats["min"] = Bandwidth
             if Bandwidth > self.Stats["max"]:
                 self.Stats["max"] = Bandwidth
             if Bandwidth < self.Stats["min"]:
                 self.Stats["min"] = Bandwidth
             self.rsh(node, "rm -f %s" % fstmpfile)
             os.unlink(self.tempfile)
             return self.success()
         else:
             return self.failure("no response from tcpdump command [%d]!" % rc)
 
     def countbandwidth(self, file):
         fp = open(file, "r")
         fp.seek(0)
         count = 0
         sum = 0
         while 1:
             line = fp.readline()
             if not line:
                 return None
             if re.search("udp",line) or re.search("UDP,", line):
                 count = count + 1
                 linesplit = string.split(line," ")
                 for j in range(len(linesplit)-1):
                     if linesplit[j] == "udp": break
                     if linesplit[j] == "length:": break
 
                 try:
                     sum = sum + int(linesplit[j+1])
                 except ValueError:
                     self.logger.log("Invalid tcpdump line: %s" % line)
                     return None
                 T1 = linesplit[0]
                 timesplit = string.split(T1,":")
                 time2split = string.split(timesplit[2],".")
                 time1 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
                 break
 
         while count < 100:
             line = fp.readline()
             if not line:
                 return None
             if re.search("udp",line) or re.search("UDP,", line):
                 count = count+1
                 linessplit = string.split(line," ")
                 for j in range(len(linessplit)-1):
                     if linessplit[j] == "udp": break
                     if linesplit[j] == "length:": break
                 try:
                     sum = int(linessplit[j+1]) + sum
                 except ValueError:
                     self.logger.log("Invalid tcpdump line: %s" % line)
                     return None
 
         T2 = linessplit[0]
         timesplit = string.split(T2,":")
         time2split = string.split(timesplit[2],".")
         time2 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
         time = time2-time1
         if (time <= 0):
             return 0
         return (sum*8)/time
 
     def is_applicable(self):
         '''BandwidthTest never applicable'''
         return 0
 
 AllTestClasses.append(BandwidthTest)
 
 
 ###################################################################
 class MaintenanceMode(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "MaintenanceMode"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.max = 30
         #self.is_unsafe = 1
         self.benchmark = 1
         self.action = "asyncmon"
         self.interval = 0
         self.rid = "maintenanceDummy"
 
     def toggleMaintenanceMode(self, node, action):
         pats = []
         pats.append(self.templates["Pat:DC_IDLE"])
 
         # fail the resource right after turning Maintenance mode on
         # verify it is not recovered until maintenance mode is turned off
         if action == "On":
             pats.append("Updating failcount for %s on .* after .* %s" % (self.rid, self.action))
         else:
             pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "stop_0"))
             pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "start_0"))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.debug("Turning maintenance mode %s" % action)
         self.rsh(node, self.templates["MaintenanceMode%s" % (action)])
         if (action == "On"):
             self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
 
         self.set_timer("recover%s" % (action))
         watch.lookforall()
         self.log_timer("recover%s" % (action))
         if watch.unmatched:
             self.debug("Failed to find patterns when turning maintenance mode %s" % action)
             return repr(watch.unmatched)
 
         return ""
 
     def insertMaintenanceDummy(self, node):
         pats = []
         pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % (self.rid, "start_0")))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.CM.AddDummyRsc(node, self.rid)
 
         self.set_timer("addDummy")
         watch.lookforall()
         self.log_timer("addDummy")
 
         if watch.unmatched:
             self.debug("Failed to find patterns when adding maintenance dummy resource")
             return repr(watch.unmatched)
         return ""
 
     def removeMaintenanceDummy(self, node):
         pats = []
         pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "stop_0"))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
         self.CM.RemoveDummyRsc(node, self.rid)
 
         self.set_timer("removeDummy")
         watch.lookforall()
         self.log_timer("removeDummy")
 
         if watch.unmatched:
             self.debug("Failed to find patterns when removing maintenance dummy resource")
             return repr(watch.unmatched)
         return ""
 
     def managedRscList(self, node):
         rscList = []
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if tmp.managed():
                     rscList.append(tmp.id)
 
         return rscList
 
     def verifyResources(self, node, rscList, managed):
         managedList = list(rscList)
         managed_str = "managed"
         if not managed:
             managed_str = "unmanaged"
 
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if managed and not tmp.managed():
                     continue
                 elif not managed and tmp.managed():
                     continue
                 elif managedList.count(tmp.id):
                     managedList.remove(tmp.id)
 
         if len(managedList) == 0:
             self.debug("Found all %s resources on %s" % (managed_str, node))
             return True
 
         self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList))
         return False
 
     def __call__(self, node):
         '''Perform the 'MaintenanceMode' test. '''
         self.incr("calls")
         verify_managed = False
         verify_unmanaged = False
         failPat = ""
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         # get a list of all the managed resources. We use this list
         # after enabling maintenance mode to verify all managed resources
         # become un-managed.  After maintenance mode is turned off, we use
         # this list to verify all the resources become managed again.
         managedResources = self.managedRscList(node)
         if len(managedResources) == 0:
             self.logger.log("No managed resources on %s" % node)
             return self.skipped()
 
         # insert a fake resource we can fail during maintenance mode
         # so we can verify recovery does not take place until after maintenance
         # mode is disabled.
         failPat = failPat + self.insertMaintenanceDummy(node)
 
         # toggle maintenance mode ON, then fail dummy resource.
         failPat = failPat + self.toggleMaintenanceMode(node, "On")
 
         # verify all the resources are now unmanaged
         if self.verifyResources(node, managedResources, False):
             verify_unmanaged = True
 
         # Toggle maintenance mode  OFF, verify dummy is recovered.
         failPat = failPat + self.toggleMaintenanceMode(node, "Off")
 
         # verify all the resources are now managed again
         if self.verifyResources(node, managedResources, True):
             verify_managed = True
 
         # Remove our maintenance dummy resource.
         failPat = failPat + self.removeMaintenanceDummy(node)
 
         self.CM.cluster_stable()
 
         if failPat != "":
             return self.failure("Unmatched patterns: %s" % (failPat))
         elif verify_unmanaged is False:
             return self.failure("Failed to verify resources became unmanaged during maintenance mode")
         elif verify_managed is False:
             return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode")
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
-        return [ """Updating failcount for %s""" % self.rid,
-                 """LogActions: Recover %s""" % self.rid,
-                 """Unknown operation: fail""",
-                 """(ERROR|error): sending stonithRA op to stonithd failed.""",
-                 self.templates["Pat:RscOpOK"] % (self.rid, ("%s_%d" % (self.action, self.interval))),
-                 """(ERROR|error): process_graph_event: Action %s_%s_%d .* initiated outside of a transition""" % (self.rid, self.action, self.interval),
-                ]
+        return [
+            r"Updating failcount for %s" % self.rid,
+            r"pengine.*: Recover %s\s*\(.*\)" % self.rid,
+            r"Unknown operation: fail",
+            r"(ERROR|error): sending stonithRA op to stonithd failed.",
+            self.templates["Pat:RscOpOK"] % (self.rid, ("%s_%d" % (self.action, self.interval))),
+            r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval),
+        ]
 
 AllTestClasses.append(MaintenanceMode)
 
 
 class ResourceRecover(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "ResourceRecover"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.max = 30
         self.rid = None
         self.rid_alt = None
         #self.is_unsafe = 1
         self.benchmark = 1
 
         # these are the values used for the new LRM API call
         self.action = "asyncmon"
         self.interval = 0
 
     def __call__(self, node):
         '''Perform the 'ResourceRecover' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         resourcelist = self.CM.active_resources(node)
         # if there are no resourcelist, return directly
         if len(resourcelist) == 0:
             self.logger.log("No active resources on %s" % node)
             return self.skipped()
 
         self.rid = self.Env.RandomGen.choice(resourcelist)
         self.rid_alt = self.rid
 
         rsc = None
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if tmp.id == self.rid:
                     rsc = tmp
                     # Handle anonymous clones that get renamed
                     self.rid = rsc.clone_id
                     break
 
         if not rsc:
             return self.failure("Could not find %s in the resource list" % self.rid)
 
         self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id))
 
         pats = []
         pats.append("Updating failcount for %s on .* after .* %s"
                     % (self.rid, self.action))
 
         if rsc.managed():
             pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "stop_0"))
             if rsc.unique():
                 pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "start_0"))
             else:
                 # Anonymous clones may get restarted with a different clone number
                 pats.append(self.templates["Pat:RscOpOK"] % (".*", "start_0"))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
 
         self.set_timer("recover")
         watch.lookforall()
         self.log_timer("recover")
 
         self.CM.cluster_stable()
         recovered = self.CM.ResourceLocation(self.rid)
 
         if watch.unmatched:
             return self.failure("Patterns not found: %s" % repr(watch.unmatched))
 
         elif rsc.unique() and len(recovered) > 1:
             return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered)))
 
         elif len(recovered) > 0:
             self.debug("%s is running on: %s" % (self.rid, repr(recovered)))
 
         elif rsc.managed():
             return self.failure("%s was not recovered and is inactive" % self.rid)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
-        return [ """Updating failcount for %s""" % self.rid,
-                 """LogActions: Recover %s""" % self.rid,
-                 """LogActions: Recover %s""" % self.rid_alt,
-                 """Unknown operation: fail""",
-                 """(ERROR|error): sending stonithRA op to stonithd failed.""",
-                 self.templates["Pat:RscOpOK"] % (self.rid, ("%s_%d" % (self.action, self.interval))),
-                 """(ERROR|error): process_graph_event: Action %s_%s_%d .* initiated outside of a transition""" % (self.rid, self.action, self.interval),
-                 ]
+        return [
+            r"Updating failcount for %s" % self.rid,
+            r"pengine.*: Recover (%s|%s)\s*\(.*\)" % (self.rid, self.rid_alt),
+            r"Unknown operation: fail",
+            r"(ERROR|error): sending stonithRA op to stonithd failed.",
+            self.templates["Pat:RscOpOK"] % (self.rid, ("%s_%d" % (self.action, self.interval))),
+            r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval),
+        ]
 
 AllTestClasses.append(ResourceRecover)
 
 
 class ComponentFail(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "ComponentFail"
         # TODO make this work correctly in docker.
         self.is_docker_unsafe = 1
         self.startall = SimulStartLite(cm)
         self.complist = cm.Components()
         self.patterns = []
         self.okerrpatterns = []
         self.is_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'ComponentFail' test. '''
         self.incr("calls")
         self.patterns = []
         self.okerrpatterns = []
 
         # start all nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         if not self.CM.cluster_stable(self.Env["StableTime"]):
             return self.failure("Setup failed - unstable")
 
         node_is_dc = self.CM.is_node_dc(node, None)
 
         # select a component to kill
         chosen = self.Env.RandomGen.choice(self.complist)
         while chosen.dc_only == 1 and node_is_dc == 0:
             chosen = self.Env.RandomGen.choice(self.complist)
 
         self.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot))
         self.incr(chosen.name)
 
         if chosen.name != "aisexec" and chosen.name != "corosync":
             if self.Env["Name"] != "crm-lha" or chosen.name != "pengine":
                 self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name))
                 self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name))
 
         self.patterns.extend(chosen.pats)
         if node_is_dc:
           self.patterns.extend(chosen.dc_pats)
 
         # In an ideal world, this next stuff should be in the "chosen" object as a member function
         if self.Env["Name"] == "crm-lha" and chosen.triggersreboot:
             # Make sure the node goes down and then comes back up if it should reboot...
             for other in self.Env["nodes"]:
                 if other != node:
                     self.patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node)))
             self.patterns.append(self.templates["Pat:Slave_started"] % node)
             self.patterns.append(self.templates["Pat:Local_started"] % node)
 
             if chosen.dc_only:
                 # Sometimes these will be in the log, and sometimes they won't...
                 self.okerrpatterns.append("%s .*Process %s:.* exited" % (node, chosen.name))
                 self.okerrpatterns.append("%s .*I_ERROR.*crmdManagedChildDied" % node)
                 self.okerrpatterns.append("%s .*The %s subsystem terminated unexpectedly" % (node, chosen.name))
                 self.okerrpatterns.append("(ERROR|error): Client .* exited with return code")
             else:
                 # Sometimes this won't be in the log...
                 self.okerrpatterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name))
                 self.okerrpatterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name))
                 self.okerrpatterns.append(self.templates["Pat:ChildExit"])
 
         if chosen.name == "stonith":
             # Ignore actions for STONITH resources
             (rc, lines) = self.rsh(node, "crm_resource -c", None)
             for line in lines:
                 if re.search("^Resource", line):
                     r = AuditResource(self.CM, line)
                     if r.rclass == "stonith":
                         self.okerrpatterns.append(self.templates["LogActions: Recover.*%s"] % r.id)
 
         # supply a copy so self.patterns doesnt end up empty
         tmpPats = []
         tmpPats.extend(self.patterns)
         self.patterns.extend(chosen.badnews_ignore)
 
         # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
         stonithPats = []
         stonithPats.append(self.templates["Pat:Fencing_ok"] % node)
         stonith = self.create_watch(stonithPats, 0)
         stonith.setwatch()
 
         # set the watch for stable
         watch = self.create_watch(
             tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"])
         watch.setwatch()
 
         # kill the component
         chosen.kill(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         self.debug("Waiting for any STONITHd node to come back up")
         self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
 
         self.debug("Waiting for the cluster to re-stabilize with all nodes")
         self.CM.cluster_stable(self.Env["StartTime"])
 
         self.debug("Checking if %s was shot" % node)
         shot = stonith.look(60)
         if shot:
             self.debug("Found: " + repr(shot))
             self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node)
 
             if self.Env["at-boot"] == 0:
                 self.CM.ShouldBeStatus[node] = "down"
 
             # If fencing occurred, chances are many (if not all) the expected logs
             # will not be sent - or will be lost when the node reboots
             return self.success()
 
         # check for logs indicating a graceful recovery
         matched = watch.lookforall(allow_multiple_matches=1)
         if watch.unmatched:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
 
         self.debug("Waiting for the cluster to re-stabilize with all nodes")
         is_stable = self.CM.cluster_stable(self.Env["StartTime"])
 
         if not matched:
             return self.failure("Didn't find all expected %s patterns" % chosen.name)
         elif not is_stable:
             return self.failure("Cluster did not become stable after killing %s" % chosen.name)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
     # Note that okerrpatterns refers to the last time we ran this test
     # The good news is that this works fine for us...
         self.okerrpatterns.extend(self.patterns)
         return self.okerrpatterns
 
 AllTestClasses.append(ComponentFail)
 
 
 class SplitBrainTest(CTSTest):
     '''It is used to test split-brain. when the path between the two nodes break
        check the two nodes both take over the resource'''
     def __init__(self,cm):
         CTSTest.__init__(self,cm)
         self.name = "SplitBrain"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.is_experimental = 1
 
     def isolate_partition(self, partition):
         other_nodes = []
         other_nodes.extend(self.Env["nodes"])
 
         for node in partition:
             try:
                 other_nodes.remove(node)
             except ValueError:
                 self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition))
 
         if len(other_nodes) == 0:
             return 1
 
         self.debug("Creating partition: " + repr(partition))
         self.debug("Everyone else: " + repr(other_nodes))
 
         for node in partition:
             if not self.CM.isolate_node(node, other_nodes):
                 self.logger.log("Could not isolate %s" % node)
                 return 0
 
         return 1
 
     def heal_partition(self, partition):
         other_nodes = []
         other_nodes.extend(self.Env["nodes"])
 
         for node in partition:
             try:
                 other_nodes.remove(node)
             except ValueError:
                 self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]))
 
         if len(other_nodes) == 0:
             return 1
 
         self.debug("Healing partition: " + repr(partition))
         self.debug("Everyone else: " + repr(other_nodes))
 
         for node in partition:
             self.CM.unisolate_node(node, other_nodes)
 
     def __call__(self, node):
         '''Perform split-brain test'''
         self.incr("calls")
         self.passed = 1
         partitions = {}
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         while 1:
             # Retry until we get multiple partitions
             partitions = {}
             p_max = len(self.Env["nodes"])
             for node in self.Env["nodes"]:
                 p = self.Env.RandomGen.randint(1, p_max)
                 if not partitions.has_key(p):
                     partitions[p] = []
                 partitions[p].append(node)
             p_max = len(partitions.keys())
             if p_max > 1:
                 break
             # else, try again
 
         self.debug("Created %d partitions" % p_max)
         for key in partitions.keys():
             self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key]))
 
         # Disabling STONITH to reduce test complexity for now
         self.rsh(node, "crm_attribute -V -n stonith-enabled -v false")
 
         for key in partitions.keys():
             self.isolate_partition(partitions[key])
 
         count = 30
         while count > 0:
             if len(self.CM.find_partitions()) != p_max:
                 time.sleep(10)
             else:
                 break
         else:
             self.failure("Expected partitions were not created")
 
         # Target number of partitions formed - wait for stability
         if not self.CM.cluster_stable():
             self.failure("Partitioned cluster not stable")
 
         # Now audit the cluster state
         self.CM.partitions_expected = p_max
         if not self.audit():
             self.failure("Audits failed")
         self.CM.partitions_expected = 1
 
         # And heal them again
         for key in partitions.keys():
             self.heal_partition(partitions[key])
 
         # Wait for a single partition to form
         count = 30
         while count > 0:
             if len(self.CM.find_partitions()) != 1:
                 time.sleep(10)
                 count -= 1
             else:
                 break
         else:
             self.failure("Cluster did not reform")
 
         # Wait for it to have the right number of members
         count = 30
         while count > 0:
             members = []
 
             partitions = self.CM.find_partitions()
             if len(partitions) > 0:
                 members = partitions[0].split()
 
             if len(members) != len(self.Env["nodes"]):
                 time.sleep(10)
                 count -= 1
             else:
                 break
         else:
             self.failure("Cluster did not completely reform")
 
         # Wait up to 20 minutes - the delay is more preferable than
         # trying to continue with in a messed up state
         if not self.CM.cluster_stable(1200):
             self.failure("Reformed cluster not stable")
             answer = raw_input('Continue? [nY]')
             if answer and answer == "n":
                 raise ValueError("Reformed cluster not stable")
 
         # Turn fencing back on
         if self.Env["DoFencing"]:
             self.rsh(node, "crm_attribute -V -D -n stonith-enabled")
 
         self.CM.cluster_stable()
 
         if self.passed:
             return self.success()
         return self.failure("See previous errors")
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return [
-            "Another DC detected:",
-            "(ERROR|error): attrd_cib_callback: .*Application of an update diff failed",
-            "crmd_ha_msg_callback:.*not in our membership list",
-            "CRIT:.*node.*returning after partition",
-            ]
+            r"Another DC detected:",
+            r"(ERROR|error).*: .*Application of an update diff failed",
+            r"crmd.*:.*not in our membership list",
+            r"CRIT:.*node.*returning after partition",
+        ]
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return 0
         return len(self.Env["nodes"]) > 2
 
 AllTestClasses.append(SplitBrainTest)
 
 
 class Reattach(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Reattach"
         self.startall = SimulStartLite(cm)
         self.restart1 = RestartTest(cm)
         self.stopall = SimulStopLite(cm)
         self.is_unsafe = 0 # Handled by canrunnow()
 
     def setup(self, node):
         attempt = 0
         if not self.startall(None):
             return None
 
         # Make sure we are really _really_ stable and that all
         # resources, including those that depend on transient node
         # attributes, are started
         while not self.CM.cluster_stable(double_check=True):
             if attempt < 5:
                 attempt += 1
                 self.debug("Not stable yet, re-testing")
             else:
                 self.logger.log("Cluster is not stable")
                 return None
 
         return 1
 
     def teardown(self, node):
 
         # Make sure 'node' is up
         start = StartTest(self.CM)
         start(node)
 
         is_managed = self.rsh(node, "crm_attribute -Q -G -t crm_config -n is-managed-default -d true", 1)
         is_managed = is_managed[:-1] # Strip off the newline
         if is_managed != "true":
             self.logger.log("Attempting to re-enable resource management on %s (%s)" % (node, is_managed))
             managed = self.create_watch(["is-managed-default"], 60)
             managed.setwatch()
 
             self.rsh(node, "crm_attribute -V -D -n is-managed-default")
 
             if not managed.lookforall():
                 self.logger.log("Patterns not found: " + repr(managed.unmatched))
                 self.logger.log("Could not re-enable resource management")
                 return 0
 
         return 1
 
     def canrunnow(self, node):
         '''Return TRUE if we can meaningfully run right now'''
         if self.find_ocfs2_resources(node):
             self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present")
             return 0
         return 1
 
     def __call__(self, node):
         self.incr("calls")
 
         pats = []
         managed = self.create_watch(["is-managed-default"], 60)
         managed.setwatch()
 
         self.debug("Disable resource management")
         self.rsh(node, "crm_attribute -V -n is-managed-default -v false")
 
         if not managed.lookforall():
             self.logger.log("Patterns not found: " + repr(managed.unmatched))
             return self.failure("Resource management not disabled")
 
         pats = []
         pats.append(self.templates["Pat:RscOpOK"] % (".*", "start"))
         pats.append(self.templates["Pat:RscOpOK"] % (".*", "stop"))
         pats.append(self.templates["Pat:RscOpOK"] % (".*", "promote"))
         pats.append(self.templates["Pat:RscOpOK"] % (".*", "demote"))
         pats.append(self.templates["Pat:RscOpOK"] % (".*", "migrate"))
 
         watch = self.create_watch(pats, 60, "ShutdownActivity")
         watch.setwatch()
 
         self.debug("Shutting down the cluster")
         ret = self.stopall(None)
         if not ret:
             self.debug("Re-enable resource management")
             self.rsh(node, "crm_attribute -V -D -n is-managed-default")
             return self.failure("Couldn't shut down the cluster")
 
         self.debug("Bringing the cluster back up")
         ret = self.startall(None)
         time.sleep(5) # allow ping to update the CIB
         if not ret:
             self.debug("Re-enable resource management")
             self.rsh(node, "crm_attribute -V -D -n is-managed-default")
             return self.failure("Couldn't restart the cluster")
 
         if self.local_badnews("ResourceActivity:", watch):
             self.debug("Re-enable resource management")
             self.rsh(node, "crm_attribute -V -D -n is-managed-default")
             return self.failure("Resources stopped or started during cluster restart")
 
         watch = self.create_watch(pats, 60, "StartupActivity")
         watch.setwatch()
 
         managed = self.create_watch(["is-managed-default"], 60)
         managed.setwatch()
 
         self.debug("Re-enable resource management")
         self.rsh(node, "crm_attribute -V -D -n is-managed-default")
 
         if not managed.lookforall():
             self.logger.log("Patterns not found: " + repr(managed.unmatched))
             return self.failure("Resource management not enabled")
 
         self.CM.cluster_stable()
 
         # Ignore actions for STONITH resources
         ignore = []
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rclass == "stonith":
 
                     self.debug("Ignoring start actions for %s" % r.id)
                     ignore.append(self.templates["Pat:RscOpOK"] % (r.id, "start_0"))
 
         if self.local_badnews("ResourceActivity:", watch, ignore):
             return self.failure("Resources stopped or started after resource management was re-enabled")
 
         return ret
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
-            "resources were active at shutdown",
-            "pingd: .*(ERROR|error): send_ipc_message:",
-            "pingd: .*(ERROR|error): send_update:",
-            "lrmd: .*(ERROR|error): notify_client:",
-            ]
+            r"resources were active at shutdown",
+        ]
 
     def is_applicable(self):
         if self.Env["Name"] == "crm-lha":
             return None
         return 1
 
 AllTestClasses.append(Reattach)
 
 
 class SpecialTest1(CTSTest):
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SpecialTest1"
         self.startall = SimulStartLite(cm)
         self.restart1 = RestartTest(cm)
         self.stopall = SimulStopLite(cm)
 
     def __call__(self, node):
         '''Perform the 'SpecialTest1' test for Andrew. '''
         self.incr("calls")
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Could not stop all nodes")
 
         # Test config recovery when the other nodes come up
         self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*")
 
         #        Start the selected node
         ret = self.restart1(node)
         if not ret:
             return self.failure("Could not start "+node)
 
         #        Start all remaining nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Could not start the remaining nodes")
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         # Errors that occur as a result of the CIB being wiped
         return [
-            """warning: retrieveCib: Cluster configuration not found:""",
-            """error: cib_perform_op: v1 patchset error, patch failed to apply: Application of an update diff failed""",
-            """error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined""",
-            """error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option""",
-            """error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity""",
+            r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed",
+            r"error.*: Resource start-up disabled since no STONITH resources have been defined",
+            r"error.*: Either configure some or disable STONITH with the stonith-enabled option",
+            r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity",
         ]
 
 AllTestClasses.append(SpecialTest1)
 
 
 class HAETest(CTSTest):
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "HAETest"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
         self.is_loop = 1
 
     def setup(self, node):
         #  Start all remaining nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Couldn't start all nodes")
         return self.success()
 
     def teardown(self, node):
         # Stop everything
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
         return self.success()
 
     def wait_on_state(self, node, resource, expected_clones, attempts=240):
         while attempts > 0:
             active = 0
             (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None)
 
             # Hack until crm_resource does the right thing
             if rc == 0 and lines:
                 active = len(lines)
 
             if len(lines) == expected_clones:
                 return 1
 
             elif rc == 1:
                 self.debug("Resource %s is still inactive" % resource)
 
             elif rc == 234:
                 self.logger.log("Unknown resource %s" % resource)
                 return 0
 
             elif rc == 246:
                 self.logger.log("Cluster is inactive")
                 return 0
 
             elif rc != 0:
                 self.logger.log("Call to crm_resource failed, rc=%d" % rc)
                 return 0
 
             else:
                 self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones))
 
             attempts -= 1
             time.sleep(1)
 
         return 0
 
     def find_dlm(self, node):
         self.r_dlm = None
 
         (rc, lines) = self.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rtype == "controld" and r.parent != "NA":
                     self.debug("Found dlm: %s" % self.r_dlm)
                     self.r_dlm = r.parent
                     return 1
         return 0
 
     def find_hae_resources(self, node):
         self.r_dlm = None
         self.r_o2cb = None
         self.r_ocfs2 = []
 
         if self.find_dlm(node):
             self.find_ocfs2_resources(node)
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return 0
         if self.Env["Schema"] == "hae":
             return 1
         return None
 
 
 class HAERoleTest(HAETest):
     def __init__(self, cm):
         '''Lars' mount/unmount test for the HA extension. '''
         HAETest.__init__(self,cm)
         self.name = "HAERoleTest"
 
     def change_state(self, node, resource, target):
         rc = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s  --meta" % (resource, target))
         return rc
 
     def __call__(self, node):
         self.incr("calls")
         lpc = 0
         failed = 0
         delay = 2
         done = time.time() + self.Env["loop-minutes"]*60
         self.find_hae_resources(node)
 
         clone_max = len(self.Env["nodes"])
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             self.change_state(node, self.r_dlm, "Stopped")
             if not self.wait_on_state(node, self.r_dlm, 0):
                 self.failure("%s did not go down correctly" % self.r_dlm)
                 failed = lpc
 
             self.change_state(node, self.r_dlm, "Started")
             if not self.wait_on_state(node, self.r_dlm, clone_max):
                 self.failure("%s did not come up correctly" % self.r_dlm)
                 failed = lpc
 
             if not self.wait_on_state(node, self.r_o2cb, clone_max):
                 self.failure("%s did not come up correctly" % self.r_o2cb)
                 failed = lpc
 
             for fs in self.r_ocfs2:
                 if not self.wait_on_state(node, fs, clone_max):
                     self.failure("%s did not come up correctly" % fs)
                     failed = lpc
 
         if failed:
             return self.failure("iteration %d failed" % failed)
         return self.success()
 
 AllTestClasses.append(HAERoleTest)
 
 
 class HAEStandbyTest(HAETest):
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         HAETest.__init__(self,cm)
         self.name = "HAEStandbyTest"
 
     def change_state(self, node, resource, target):
         rc = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target))
         return rc
 
     def __call__(self, node):
         self.incr("calls")
 
         lpc = 0
         failed = 0
         done = time.time() + self.Env["loop-minutes"]*60
         self.find_hae_resources(node)
 
         clone_max = len(self.Env["nodes"])
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             self.change_state(node, self.r_dlm, "true")
             if not self.wait_on_state(node, self.r_dlm, clone_max-1):
                 self.failure("%s did not go down correctly" % self.r_dlm)
                 failed = lpc
 
             self.change_state(node, self.r_dlm, "false")
             if not self.wait_on_state(node, self.r_dlm, clone_max):
                 self.failure("%s did not come up correctly" % self.r_dlm)
                 failed = lpc
 
             if not self.wait_on_state(node, self.r_o2cb, clone_max):
                 self.failure("%s did not come up correctly" % self.r_o2cb)
                 failed = lpc
 
             for fs in self.r_ocfs2:
                 if not self.wait_on_state(node, fs, clone_max):
                     self.failure("%s did not come up correctly" % fs)
                     failed = lpc
 
         if failed:
             return self.failure("iteration %d failed" % failed)
         return self.success()
 
 AllTestClasses.append(HAEStandbyTest)
 
 
 class NearQuorumPointTest(CTSTest):
     '''
     This test brings larger clusters near the quorum point (50%).
     In addition, it will test doing starts and stops at the same time.
 
     Here is how I think it should work:
     - loop over the nodes and decide randomly which will be up and which
       will be down  Use a 50% probability for each of up/down.
     - figure out what to do to get into that state from the current state
     - in parallel, bring up those going up  and bring those going down.
     '''
 
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "NearQuorumPoint"
 
     def __call__(self, dummy):
         '''Perform the 'NearQuorumPoint' test. '''
         self.incr("calls")
         startset = []
         stopset = []
 
         stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint")
         #decide what to do with each node
         for node in self.Env["nodes"]:
             action = self.Env.RandomGen.choice(["start","stop"])
             #action = self.Env.RandomGen.choice(["start","stop","no change"])
             if action == "start" :
                 startset.append(node)
             elif action == "stop" :
                 stopset.append(node)
 
         self.debug("start nodes:" + repr(startset))
         self.debug("stop nodes:" + repr(stopset))
 
         #add search patterns
         watchpats = [ ]
         for node in stopset:
             if self.CM.ShouldBeStatus[node] == "up":
                 watchpats.append(self.templates["Pat:We_stopped"] % node)
 
         for node in startset:
             if self.CM.ShouldBeStatus[node] == "down":
                 #watchpats.append(self.templates["Pat:Slave_started"] % node)
                 watchpats.append(self.templates["Pat:Local_started"] % node)
             else:
                 for stopping in stopset:
                     if self.CM.ShouldBeStatus[stopping] == "up":
                         watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping)))
 
         if len(watchpats) == 0:
             return self.skipped()
 
         if len(startset) != 0:
             watchpats.append(self.templates["Pat:DC_IDLE"])
 
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
 
         watch.setwatch()
 
         #begin actions
         for node in stopset:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.CM.StopaCMnoBlock(node)
 
         for node in startset:
             if self.CM.ShouldBeStatus[node] == "down":
                 self.CM.StartaCMnoBlock(node)
 
         #get the result
         if watch.lookforall():
             self.CM.cluster_stable()
             self.CM.fencing_cleanup("NearQuorumPoint", stonith)
             return self.success()
 
         self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched))
 
         #get the "bad" nodes
         upnodes = []
         for node in stopset:
             if self.CM.StataCM(node) == 1:
                 upnodes.append(node)
 
         downnodes = []
         for node in startset:
             if self.CM.StataCM(node) == 0:
                 downnodes.append(node)
 
         self.CM.fencing_cleanup("NearQuorumPoint", stonith)
         if upnodes == [] and downnodes == []:
             self.CM.cluster_stable()
 
             # Make sure they're completely down with no residule
             for node in stopset:
                 self.rsh(node, self.templates["StopCmd"])
 
             return self.success()
 
         if len(upnodes) > 0:
             self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes))
 
         if len(downnodes) > 0:
             self.logger.log("Warn: Unstartable nodes: " + repr(downnodes))
 
         return self.failure()
 
     def is_applicable(self):
         if self.Env["Name"] == "crm-cman":
             return None
         return 1
 
 AllTestClasses.append(NearQuorumPointTest)
 
 
 class RollingUpgradeTest(CTSTest):
     '''Perform a rolling upgrade of the cluster'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RollingUpgrade"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
 
     def setup(self, node):
         #  Start all remaining nodes
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
 
         for node in self.Env["nodes"]:
             if not self.downgrade(node, None):
                 return self.failure("Couldn't downgrade %s" % node)
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Couldn't start all nodes")
         return self.success()
 
     def teardown(self, node):
         # Stop everything
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
 
         for node in self.Env["nodes"]:
             if not self.upgrade(node, None):
                 return self.failure("Couldn't upgrade %s" % node)
 
         return self.success()
 
     def install(self, node, version, start=1, flags="--force"):
 
         target_dir = "/tmp/rpm-%s" % version
         src_dir = "%s/%s" % (self.Env["rpm-dir"], version)
 
         self.logger.log("Installing %s on %s with %s" % (version, node, flags))
         if not self.stop(node):
             return self.failure("stop failure: "+node)
 
         rc = self.rsh(node, "mkdir -p %s" % target_dir)
         rc = self.rsh(node, "rm -f %s/*.rpm" % target_dir)
         (rc, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, None)
         for line in lines:
             line = line[:-1]
             rc = self.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir))
         rc = self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir))
 
         if start and not self.start(node):
             return self.failure("start failure: "+node)
 
         return self.success()
 
     def upgrade(self, node, start=1):
         return self.install(node, self.Env["current-version"], start)
 
     def downgrade(self, node, start=1):
         return self.install(node, self.Env["previous-version"], start, "--force --nodeps")
 
     def __call__(self, node):
         '''Perform the 'Rolling Upgrade' test. '''
         self.incr("calls")
 
         for node in self.Env["nodes"]:
             if self.upgrade(node):
                 return self.failure("Couldn't upgrade %s" % node)
 
             self.CM.cluster_stable()
 
         return self.success()
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return None
 
         if not self.Env.has_key("rpm-dir"):
             return None
         if not self.Env.has_key("current-version"):
             return None
         if not self.Env.has_key("previous-version"):
             return None
 
         return 1
 
 #        Register RestartTest as a good test to run
 AllTestClasses.append(RollingUpgradeTest)
 
 
 class BSC_AddResource(CTSTest):
     '''Add a resource to the cluster'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "AddResource"
         self.resource_offset = 0
         self.cib_cmd = """cibadmin -C -o %s -X '%s' """
 
     def __call__(self, node):
         self.incr("calls")
         self.resource_offset =         self.resource_offset  + 1
 
         r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset)
         start_pat = "crmd.*%s_start_0.*confirmed.*ok"
 
         patterns = []
         patterns.append(start_pat % r_id)
 
         watch = self.create_watch(patterns, self.Env["DeadTime"])
         watch.setwatch()
 
         ip = self.NextIP()
         if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip):
             return self.failure("Make resource %s failed" % r_id)
 
         failed = 0
         watch_result = watch.lookforall()
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.logger.log ("Warn: Pattern not found: %s" % (regex))
                 failed = 1
 
         if failed:
             return self.failure("Resource pattern(s) not found")
 
         if not self.CM.cluster_stable(self.Env["DeadTime"]):
             return self.failure("Unstable cluster")
 
         return self.success()
 
     def NextIP(self):
         ip = self.Env["IPBase"]
         if ":" in ip:
             fields = ip.rpartition(":")
             fields[2] = str(hex(int(fields[2], 16)+1))
             print str(hex(int(f[2], 16)+1))
         else:
             fields = ip.rpartition('.')
             fields[2] = str(int(fields[2])+1)
 
         ip = fields[0] + fields[1] + fields[3];
         self.Env["IPBase"] = ip
         return ip.strip()
 
     def make_ip_resource(self, node, id, rclass, type, ip):
         self.logger.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node))
         rsc_xml="""
 <primitive id="%s" class="%s" type="%s"  provider="heartbeat">
     <instance_attributes id="%s"><attributes>
         <nvpair id="%s" name="ip" value="%s"/>
     </attributes></instance_attributes>
 </primitive>""" % (id, rclass, type, id, id, ip)
 
         node_constraint = """
       <rsc_location id="run_%s" rsc="%s">
         <rule id="pref_run_%s" score="100">
           <expression id="%s_loc_expr" attribute="#uname" operation="eq" value="%s"/>
         </rule>
       </rsc_location>""" % (id, id, id, id, node)
 
         rc = 0
         (rc, lines) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), None)
         if rc != 0:
             self.logger.log("Constraint creation failed: %d" % rc)
             return None
 
         (rc, lines) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), None)
         if rc != 0:
             self.logger.log("Resource creation failed: %d" % rc)
             return None
 
         return 1
 
     def is_applicable(self):
         if self.Env["DoBSC"]:
             return 1
         return None
 
 AllTestClasses.append(BSC_AddResource)
 
 
 class SimulStopLite(CTSTest):
     '''Stop any active nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStopLite"
 
     def __call__(self, dummy):
         '''Perform the 'SimulStopLite' setup work. '''
         self.incr("calls")
 
         self.debug("Setup: " + self.name)
 
         #     We ignore the "node" parameter...
         watchpats = [ ]
 
         for node in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.incr("WasStarted")
                 watchpats.append(self.templates["Pat:We_stopped"] % node)
                 #if self.Env["use_logd"]:
                 #    watchpats.append(self.templates["Pat:Logd_stopped"] % node)
 
         if len(watchpats) == 0:
             self.CM.clear_all_caches()
             return self.success()
 
         #     Stop all the nodes - at about the same time...
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
 
         watch.setwatch()
         self.set_timer()
         for node in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.CM.StopaCMnoBlock(node)
         if watch.lookforall():
             self.CM.clear_all_caches()
 
             # Make sure they're completely down with no residule
             for node in self.Env["nodes"]:
                 self.rsh(node, self.templates["StopCmd"])
 
             return self.success()
 
         did_fail = 0
         up_nodes = []
         for node in self.Env["nodes"]:
             if self.CM.StataCM(node) == 1:
                 did_fail = 1
                 up_nodes.append(node)
 
         if did_fail:
             return self.failure("Active nodes exist: " + repr(up_nodes))
 
         self.logger.log("Warn: All nodes stopped but CTS didnt detect: "
                     + repr(watch.unmatched))
 
         self.CM.clear_all_caches()
         return self.failure("Missing log message: "+repr(watch.unmatched))
 
     def is_applicable(self):
         '''SimulStopLite is a setup test and never applicable'''
         return 0
 
 
 class SimulStartLite(CTSTest):
     '''Start any stopped nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStartLite"
 
     def __call__(self, dummy):
         '''Perform the 'SimulStartList' setup work. '''
         self.incr("calls")
         self.debug("Setup: " + self.name)
 
         #        We ignore the "node" parameter...
         node_list = []
         for node in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "down":
                 self.incr("WasStopped")
                 node_list.append(node)
 
         self.set_timer()
         while len(node_list) > 0:
             # Repeat until all nodes come up
             watchpats = [ ]
 
             uppat = self.templates["Pat:Slave_started"]
             if self.CM.upcount() == 0:
                 uppat = self.templates["Pat:Local_started"]
 
             watchpats.append(self.templates["Pat:DC_IDLE"])
             for node in node_list:
                 watchpats.append(uppat % node)
                 watchpats.append(self.templates["Pat:InfraUp"] % node)
                 watchpats.append(self.templates["Pat:PacemakerUp"] % node)
 
             #   Start all the nodes - at about the same time...
             watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
             watch.setwatch()
 
             stonith = self.CM.prepare_fencing_watcher(self.name)
 
             for node in node_list:
                 self.CM.StartaCMnoBlock(node)
 
             watch.lookforall()
 
             node_list = self.CM.fencing_cleanup(self.name, stonith)
 
             # Remove node_list messages from watch.unmatched
             for node in node_list:
                 self.logger.debug("Dealing with stonith operations for %s" % repr(node_list))
                 if watch.unmatched:
                     try:
                         watch.unmatched.remove(uppat % node)
                     except:
                         self.debug("Already matched: %s" % (uppat % node))
                     try:                        
                         watch.unmatched.remove(self.templates["Pat:InfraUp"] % node)
                     except:
                         self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node))
                     try:
                         watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node)
                     except:
                         self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node))
 
             if watch.unmatched:
                 for regex in watch.unmatched:
                     self.logger.log ("Warn: Startup pattern not found: %s" %(regex))
 
             if not self.CM.cluster_stable():
                 return self.failure("Cluster did not stabilize")
 
         did_fail = 0
         unstable = []
         for node in self.Env["nodes"]:
             if self.CM.StataCM(node) == 0:
                 did_fail = 1
                 unstable.append(node)
 
         if did_fail:
             return self.failure("Unstarted nodes exist: " + repr(unstable))
 
         unstable = []
         for node in self.Env["nodes"]:
             if not self.CM.node_stable(node):
                 did_fail = 1
                 unstable.append(node)
 
         if did_fail:
             return self.failure("Unstable cluster nodes exist: " + repr(unstable))
 
         return self.success()
 
     def is_applicable(self):
         '''SimulStartLite is a setup test and never applicable'''
         return 0
 
 
 def TestList(cm, audits):
     result = []
     for testclass in AllTestClasses:
         bound_test = testclass(cm)
         if bound_test.is_applicable():
             bound_test.Audits = audits
             result.append(bound_test)
     return result
 
 
 class RemoteLXC(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RemoteLXC"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.num_containers = 2
         self.is_container = 1
         self.is_docker_unsafe = 1
         self.failed = 0
         self.fail_string = ""
 
     def start_lxc_simple(self, node):
 
         # restore any artifacts laying around from a previous test.
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -R &>/dev/null")
 
         # generate the containers, put them in the config, add some resources to them
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.templates["Pat:RscOpOK"] % ("lxc1", "start_0"))
         pats.append(self.templates["Pat:RscOpOK"] % ("lxc2", "start_0"))
         pats.append(self.templates["Pat:RscOpOK"] % ("lxc-ms", "start_0"))
         pats.append(self.templates["Pat:RscOpOK"] % ("lxc-ms", "promote_0"))
 
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers)
         self.set_timer("remoteSimpleInit")
         watch.lookforall()
         self.log_timer("remoteSimpleInit")
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
     def cleanup_lxc_simple(self, node):
 
         pats = [ ]
         # if the test failed, attempt to clean up the cib and libvirt environment
         # as best as possible 
         if self.failed == 1:
             # restore libvirt and cib
             self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -R &>/dev/null")
             self.rsh(node, "crm_resource -C -r container1 &>/dev/null")
             self.rsh(node, "crm_resource -C -r container2 &>/dev/null")
             self.rsh(node, "crm_resource -C -r lxc1 &>/dev/null")
             self.rsh(node, "crm_resource -C -r lxc2 &>/dev/null")
             self.rsh(node, "crm_resource -C -r lxc-ms &>/dev/null")
             time.sleep(20)
             return
 
         watch = self.create_watch(pats, 120)
         watch.setwatch()
 
         pats.append(self.templates["Pat:RscOpOK"] % ("container1", "stop_0"))
         pats.append(self.templates["Pat:RscOpOK"] % ("container2", "stop_0"))
 
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null")
         self.set_timer("remoteSimpleCleanup")
         watch.lookforall()
         self.log_timer("remoteSimpleCleanup")
 
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
         # cleanup libvirt
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -R &>/dev/null")
 
     def __call__(self, node):
         '''Perform the 'RemoteLXC' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed, start all nodes failed.")
 
         rc = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null")
         if rc == 1:
             self.log("Environment test for lxc support failed.")
             return self.skipped()
 
         self.start_lxc_simple(node)
         self.cleanup_lxc_simple(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         if self.failed == 1:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
-        return [ """Updating failcount for ping""",
-                 """LogActions: Recover ping""",
-                 """LogActions: Recover lxc-ms""",
-                 """LogActions: Recover container""",
-                 # The orphaned lxc-ms resource causes an expected transition error
-                 # that is a result of the pengine not having knowledge that the 
-                 # ms resource used to be a clone.  As a result it looks like that 
-                 # resource is running in multiple locations when it shouldn't... But in
-                 # this instance we know why this error is occurring and that it is expected.
-                 """Calculated Transition .* /var/lib/pacemaker/pengine/pe-error""",
-                 """Resource lxc-ms .* is active on 2 nodes attempting recovery""",
-                 """Unknown operation: fail""",
-                 """notice: operation_finished: ping-""",
-                 """notice: operation_finished: container""",
-                 """notice: operation_finished: .*_monitor_0:.*:stderr""",
-                 """(ERROR|error): sending stonithRA op to stonithd failed.""",
-                ]
+        return [
+            r"Updating failcount for ping",
+            r"pengine.*: Recover (ping|lxc-ms|container)\s*\(.*\)",
+            # The orphaned lxc-ms resource causes an expected transition error
+            # that is a result of the pengine not having knowledge that the 
+            # ms resource used to be a clone.  As a result it looks like that 
+            # resource is running in multiple locations when it shouldn't... But in
+            # this instance we know why this error is occurring and that it is expected.
+            r"Calculated Transition .* /var/lib/pacemaker/pengine/pe-error",
+            r"Resource lxc-ms .* is active on 2 nodes attempting recovery",
+            r"Unknown operation: fail",
+            r"(ERROR|error): sending stonithRA op to stonithd failed.",
+        ]
 
 AllTestClasses.append(RemoteLXC)
 
 
 ###################################################################
 class RemoteDriver(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RemoteDriver"
         self.is_docker_unsafe = 1
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.stop = StopTest(cm)
         self.pcmk_started = 0
         self.failed = 0
         self.fail_string = ""
         self.remote_node_added = 0
         self.remote_rsc_added = 0
         self.remote_rsc = "remote-rsc"
         self.cib_cmd = """cibadmin -C -o %s -X '%s' """
 
     def del_rsc(self, node, rsc):
 
         for othernode in self.Env["nodes"]:
             if othernode == node:
                 # we don't want to try and use the cib that we just shutdown.
                 # find a cluster node that is not our soon to be remote-node.
                 continue
             rc = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc))
             if rc != 0:
                 self.fail_string = ("Removal of resource '%s' failed" % (rsc))
                 self.failed = 1
             return
 
     def add_rsc(self, node, rsc_xml):
         for othernode in self.CM.Env["nodes"]:
             if othernode == node:
                 # we don't want to try and use the cib that we just shutdown.
                 # find a cluster node that is not our soon to be remote-node.
                 continue
             rc = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml))
             if rc != 0:
                 self.fail_string = "resource creation failed"
                 self.failed = 1
             return
 
     def add_primitive_rsc(self, node):
         rsc_xml = """
 <primitive class="ocf" id="%s" provider="heartbeat" type="Dummy">
     <operations>
       <op id="remote-rsc-monitor-interval-10s" interval="10s" name="monitor"/>
     </operations>
     <meta_attributes id="remote-meta_attributes"/>
 </primitive>""" % (self.remote_rsc)
         self.add_rsc(node, rsc_xml)
         if self.failed == 0:
             self.remote_rsc_added = 1
 
     def add_connection_rsc(self, node):
         rsc_xml = """
 <primitive class="ocf" id="%s" provider="pacemaker" type="remote">
     <instance_attributes id="remote-instance_attributes"/>
         <instance_attributes id="remote-instance_attributes">
           <nvpair id="remote-instance_attributes-server" name="server" value="%s"/>
         </instance_attributes>
     <operations>
       <op id="remote-monitor-interval-60s" interval="60s" name="monitor"/>
       <op id="remote-name-start-interval-0-timeout-120" interval="0" name="start" timeout="120"/>
     </operations>
 </primitive>""" % (self.remote_node, node)
         self.add_rsc(node, rsc_xml)
         if self.failed == 0:
             self.remote_node_added = 1
 
     def stop_pcmk_remote(self, node):
         # disable pcmk remote
         for i in range(10):
             rc = self.rsh(node, "service pacemaker_remote stop")
             if rc != 0:
                 time.sleep(6)
             else:
                 break
 
     def start_pcmk_remote(self, node):
         for i in range(10):
             rc = self.rsh(node, "service pacemaker_remote start")
             if rc != 0:
                 time.sleep(6)
             else:
                 self.pcmk_started = 1
                 break
 
     def start_metal(self, node):
         pcmk_started = 0
 
         # make sure the resource doesn't already exist for some reason
         self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc))
         self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node))
 
         if not self.stop(node):
             self.failed = 1
             self.fail_string = "Failed to shutdown cluster node %s" % (node)
             return
 
         self.start_pcmk_remote(node)
 
         if self.pcmk_started == 0:
             self.failed = 1
             self.fail_string = "Failed to start pacemaker_remote on node %s" % (node)
             return
 
         # convert node to baremetal node now that it has shutdow the cluster stack
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "start"))
         pats.append(self.templates["Pat:DC_IDLE"])
 
         self.add_connection_rsc(node)
 
         self.set_timer("remoteMetalInit")
         watch.lookforall()
         self.log_timer("remoteMetalInit")
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
     def migrate_connection(self, node):
         if self.failed == 1:
             return
 
         pats = [ ]
         pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "migrate_to"))
         pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "migrate_from"))
         pats.append(self.templates["Pat:DC_IDLE"])
         watch = self.create_watch(pats, 120)
         watch.setwatch()
 
         (rc, lines) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), None)
         if rc != 0:
             self.fail_string = "failed to move remote node connection resource"
             self.logger.log(self.fail_string)
             self.failed = 1
             return
 
         self.set_timer("remoteMetalMigrate")
         watch.lookforall()
         self.log_timer("remoteMetalMigrate")
 
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.logger.log(self.fail_string)
             self.failed = 1
             return
 
     def fail_rsc(self, node):
         if self.failed == 1:
             return
 
         watchpats = [ ]
         watchpats.append(self.templates["Pat:RscRemoteOpOK"] % (self.remote_rsc, "stop", self.remote_node))
         watchpats.append(self.templates["Pat:RscRemoteOpOK"] % (self.remote_rsc, "start", self.remote_node))
         watchpats.append(self.templates["Pat:DC_IDLE"])
 
         watch = self.create_watch(watchpats, 120)
         watch.setwatch()
 
         self.debug("causing dummy rsc to fail.")
 
         rc = self.rsh(node, "rm -f /var/run/resource-agents/Dummy*")
 
         self.set_timer("remoteRscFail")
         watch.lookforall()
         self.log_timer("remoteRscFail")
         if watch.unmatched:
             self.fail_string = "Unmatched patterns during rsc fail: %s" % (repr(watch.unmatched))
             self.logger.log(self.fail_string)
             self.failed = 1
 
     def fail_connection(self, node):
         if self.failed == 1:
             return
 
         watchpats = [ ]
         watchpats.append(self.templates["Pat:FenceOpOK"] % self.remote_node)
         watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node)
 
         watch = self.create_watch(watchpats, 120)
         watch.setwatch()
 
         # force stop the pcmk remote daemon. this will result in fencing
         self.debug("Force stopped active remote node")
         self.stop_pcmk_remote(node)
 
         self.debug("Waiting for remote node to be fenced.")
         self.set_timer("remoteMetalFence")
         watch.lookforall()
         self.log_timer("remoteMetalFence")
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.logger.log(self.fail_string)
             self.failed = 1
             return
 
         self.debug("Waiting for the remote node to come back up")
         self.CM.ns.WaitForNodeToComeUp(node, 120);
 
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "start"))
         if self.remote_rsc_added == 1:
             pats.append(self.templates["Pat:RscOpOK"] % (self.remote_rsc, "monitor"))
 
         # start the remote node again watch it integrate back into cluster.
         self.start_pcmk_remote(node)
         if self.pcmk_started == 0:
             self.failed = 1
             self.fail_string = "Failed to start pacemaker_remote on node %s" % (node)
             self.logger.log(self.fail_string)
             return
 
         self.debug("Waiting for remote node to rejoin cluster after being fenced.")
         self.set_timer("remoteMetalRestart")
         watch.lookforall()
         self.log_timer("remoteMetalRestart")
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
             self.logger.log(self.fail_string)
             return
 
     def add_dummy_rsc(self, node):
         if self.failed == 1:
             return
 
         # verify we can put a resource on the remote node
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.templates["Pat:RscRemoteOpOK"] % (self.remote_rsc, "start", self.remote_node))
         pats.append(self.templates["Pat:DC_IDLE"])
 
         # Add a resource that must live on remote-node
         self.add_primitive_rsc(node)
 
         # force that rsc to prefer the remote node. 
         (rc, line) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), None)
         if rc != 0:
             self.fail_string = "Failed to place remote resource on remote node."
             self.failed = 1
             return
 
         self.set_timer("remoteMetalRsc")
         watch.lookforall()
         self.log_timer("remoteMetalRsc")
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
     def test_attributes(self, node):
         if self.failed == 1:
             return
 
         # This verifies permanent attributes can be set on a remote-node. It also
         # verifies the remote-node can edit it's own cib node section remotely.
         (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), None)
         if rc != 0:
             self.fail_string = "Failed to set remote-node attribute. rc:%s output:%s" % (rc, line)
             self.failed = 1
             return
 
         (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -Q -N %s" % (self.remote_node), None)
         if rc != 0:
             self.fail_string = "Failed to get remote-node attribute"
             self.failed = 1
             return
 
         (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), None)
         if rc != 0:
             self.fail_string = "Failed to delete remote-node attribute"
             self.failed = 1
             return
 
     def cleanup_metal(self, node):
         if self.pcmk_started == 0:
             return
 
         pats = [ ]
 
         watch = self.create_watch(pats, 120)
         watch.setwatch()
 
         if self.remote_rsc_added == 1:
             pats.append(self.templates["Pat:RscOpOK"] % (self.remote_rsc, "stop"))
         if self.remote_node_added == 1:
             pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "stop"))
 
         self.set_timer("remoteMetalCleanup")
         if self.remote_rsc_added == 1:
             self.rsh(node, "crm_resource -U -r %s -N %s" % (self.remote_rsc, self.remote_node))
             self.del_rsc(node, self.remote_rsc)
         if self.remote_node_added == 1:
             self.rsh(node, "crm_resource -U -r %s" % (self.remote_node))
             self.del_rsc(node, self.remote_node)
         watch.lookforall()
         self.log_timer("remoteMetalCleanup")
 
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
         self.stop_pcmk_remote(node)
 
     def setup_env(self, node):
 
         self.remote_node = "remote_%s" % (node)
         sync_key = 0
 
         # we are assuming if all nodes have a key, that it is
         # the right key... If any node doesn't have a remote
         # key, we regenerate it everywhere.
         for node in self.Env["nodes"]:
             rc = self.rsh(node, "ls /etc/pacemaker/authkey")
             if rc != 0:
                 sync_key = 1
                 break
 
         if sync_key == 0:
             return
 
         # create key locally
         os.system("/usr/share/pacemaker/tests/cts/lxc_autogen.sh -k &> /dev/null")
 
         # sync key throughout the cluster
         for node in self.Env["nodes"]:
             rc = self.rsh(node, "mkdir /etc/pacemaker")
             self.rsh.cp("/etc/pacemaker/authkey", "%s:/etc/pacemaker/authkey" % (node))
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return False
 
         for node in self.Env["nodes"]:
             rc = self.rsh(node, "type pacemaker_remoted >/dev/null 2>&1")
             if rc != 0:
                 return False
         return True
 
     def __call__(self, node):
         '''Perform the 'RemoteBaremetal' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed, start all nodes failed.")
 
         self.setup_env(node)
         self.start_metal(node)
         self.add_dummy_rsc(node)
         self.test_attributes(node)
         self.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.failed == 1:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [ """is running on remote.*which isn't allowed""",
                  """Connection terminated""",
                  """Failed to send remote""",
                 ]
 
 # Remote driver is called by other tests.
 
 ###################################################################
 class RemoteBasic(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RemoteBasic"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.driver = RemoteDriver(cm)
         self.is_docker_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'RemoteBaremetal' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed, start all nodes failed.")
 
         self.driver.setup_env(node)
         self.driver.start_metal(node)
         self.driver.add_dummy_rsc(node)
         self.driver.test_attributes(node)
         self.driver.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.driver.failed == 1:
             return self.failure(self.driver.fail_string)
 
         return self.success()
 
     def is_applicable(self):
         return self.driver.is_applicable()
 
     def errorstoignore(self):
         return self.driver.errorstoignore()
 
 AllTestClasses.append(RemoteBasic)
 
 ###################################################################
 class RemoteStonithd(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RemoteStonithd"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.driver = RemoteDriver(cm)
         self.is_docker_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'RemoteStonithd' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed, start all nodes failed.")
 
         self.driver.setup_env(node)
         self.driver.start_metal(node)
         self.driver.add_dummy_rsc(node)
 
         self.driver.fail_connection(node)
         self.driver.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.driver.failed == 1:
             return self.failure(self.driver.fail_string)
 
         return self.success()
 
     def is_applicable(self):
         if not self.driver.is_applicable():
             return False
 
         if self.Env.has_key("DoFencing"):
             return self.Env["DoFencing"]
 
         return True
 
     def errorstoignore(self):
         ignore_pats = [
-            """Unexpected disconnect on remote-node""",
-            """error: process_lrm_event: Operation remote_.*_monitor""",
-            """LogActions: Recover remote_""",
-            """Calculated Transition .* /var/lib/pacemaker/pengine/pe-error""",
-            """error: native_create_actions: Resource .*ocf::.* is active on 2 nodes attempting recovery""",
+            r"Unexpected disconnect on remote-node",
+            r"crmd.*: error.*: Operation remote_.*_monitor",
+            r"pengine.*: Recover remote_.*\s*\(.*\)",
+            r"Calculated Transition .* /var/lib/pacemaker/pengine/pe-error",
+            r"error.*: Resource .*ocf::.* is active on 2 nodes attempting recovery",
         ]
 
         ignore_pats.extend(self.driver.errorstoignore())
         return ignore_pats
 
 AllTestClasses.append(RemoteStonithd)
 
 ###################################################################
 class RemoteMigrate(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RemoteMigrate"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.driver = RemoteDriver(cm)
         self.is_docker_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'RemoteMigrate' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed, start all nodes failed.")
 
         self.driver.setup_env(node)
         self.driver.start_metal(node)
         self.driver.add_dummy_rsc(node)
         self.driver.migrate_connection(node)
         self.driver.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.driver.failed == 1:
             return self.failure(self.driver.fail_string)
 
         return self.success()
 
     def is_applicable(self):
         return self.driver.is_applicable()
 
     def errorstoignore(self):
         return self.driver.errorstoignore()
 
 AllTestClasses.append(RemoteMigrate)
 
 
 ###################################################################
 class RemoteRscFailure(CTSTest):
 ###################################################################
     def __init__(self, cm):
 
         # fail a rsc on a remote node, verify recovery.
         CTSTest.__init__(self,cm)
         self.name = "RemoteRscFailure"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.driver = RemoteDriver(cm)
         self.is_docker_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'RemoteRscFailure' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed, start all nodes failed.")
 
         self.driver.setup_env(node)
         self.driver.start_metal(node)
         self.driver.add_dummy_rsc(node)
 
         # This is an important step. We are migrating the connection
         # before failing the resource. This verifies that the migration
         # has properly maintained control over the remote-node.
         self.driver.migrate_connection(node)
 
         self.driver.fail_rsc(node)
         self.driver.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.driver.failed == 1:
             return self.failure(self.driver.fail_string)
 
         return self.success()
 
     def is_applicable(self):
         return self.driver.is_applicable()
 
     def errorstoignore(self):
         ignore_pats = [
-            """LogActions: Recover remote-rsc""",
+            r"pengine.*: Recover remote-rsc\s*\(.*\)",
         ]
 
         ignore_pats.extend(self.driver.errorstoignore())
         return ignore_pats
 
 AllTestClasses.append(RemoteRscFailure)
 
 # vim:ts=4:sw=4:et:
diff --git a/cts/OCFIPraTest.py b/cts/OCFIPraTest.py
index 73a76e214b..9900a620f2 100755
--- a/cts/OCFIPraTest.py
+++ b/cts/OCFIPraTest.py
@@ -1,180 +1,180 @@
 #!/usr/bin/python
 
 '''OCF IPaddr/IPaddr2 Resource Agent Test'''
 
 __copyright__ = '''
 Author: Huang Zhen <zhenhltc@cn.ibm.com>
 Copyright (C) 2004 International Business Machines
 Licensed under the GNU GPL.
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
-import string,sys,struct,os,random,time,syslog
+import string, sys, struct, os, random, time, syslog
 from cts.CTSvars import *
 
 
 def usage():
     print "usage: " + sys.argv[0]  \
     +  " [-2]"\
     +  " [--ipbase|-i first-test-ip]"\
     +  " [--ipnum|-n test-ip-num]"\
     +  " [--help|-h]"\
     +  " [--perform|-p op]"\
     +  " [number-of-iterations]"
     sys.exit(1)
 
 
 def perform_op(ra, ip, op):
     os.environ["OCF_RA_VERSION_MAJOR"]    = "1"
     os.environ["OCF_RA_VERSION_MINOR"]    = "0"
     os.environ["OCF_ROOT"]                = CTSvars.OCF_ROOT_DIR
     os.environ["OCF_RESOURCE_INSTANCE"]   = ip
     os.environ["OCF_RESOURCE_TYPE"]       = ra
     os.environ["OCF_RESKEY_ip"]           = ip
     os.environ["HA_LOGFILE"]              = "/dev/null"
     os.environ["HA_LOGFACILITY"]          = "local7"
     path = CTSvars.OCF_ROOT_DIR + "/resource.d/heartbeat/" + ra
     return os.spawnvpe(os.P_WAIT, path, [ra, op], os.environ)
 
 
 def audit(ra, iplist, ipstatus, summary):
     passed = 1
     for ip in iplist:
         ret = perform_op(ra, ip, "monitor")
         if ret != ipstatus[ip]:
             passed = 0
             log("audit: status of %s should be %d but it is %d\t [failure]" %
                 (ip,ipstatus[ip],ret))
             ipstatus[ip] = ret    
     summary["audit"]["called"] += 1;
     if passed :
         summary["audit"]["success"] += 1
     else :
         summary["audit"]["failure"] += 1
         
 
 def log(towrite):
     t = time.strftime("%Y/%m/%d_%H:%M:%S\t", time.localtime(time.time()))  
     logstr = t + " "+str(towrite)
     syslog.syslog(logstr)
     print logstr
 
 if __name__ == '__main__': 
     ra = "IPaddr"
     ipbase = "127.0.0.10"
     ipnum = 1
     itnum = 50
     perform = None
     summary = {
         "start":{"called":0,"success":0,"failure":0},
         "stop" :{"called":0,"success":0,"failure":0},
         "audit":{"called":0,"success":0,"failure":0}
     }
     syslog.openlog(sys.argv[0], 0, syslog.LOG_LOCAL7)
     
     # Process arguments...
     skipthis = None
     args = sys.argv[1:]
     for i in range(0, len(args)) :
        if skipthis :
            skipthis = None
            continue
        elif args[i] == "-2" :
            ra = "IPaddr2"
        elif args[i] == "--ip" or args[i] == "-i" :
            skipthis = 1
            ipbase = args[i+1]
        elif args[i] == "--ipnum" or args[i] == "-n" :
            skipthis = 1
            ipnum = int(args[i+1])
        elif args[i] == "--perform" or args[i] == "-p" :
            skipthis = 1
            perform = args[i+1]
        elif args[i] == "--help" or args[i] == "-h" :
            usage()
        else:
            itnum = int(args[i])
 
     log("Begin OCF IPaddr/IPaddr2 Test")
     
     # Generate the test ips
     iplist = []
     ipstatus = {}
     fields = string.split(ipbase, '.')
     for i in range(0, ipnum) :
         ip = string.join(fields, '.')
         iplist.append(ip)
         ipstatus[ip] = perform_op(ra,ip,"monitor")
         fields[3] = str(int(fields[3])+1)
     log("Test ip:" + str(iplist))
     
     # If use ask perform an operation
     if perform != None:
         log("Perform opeartion %s"%perform)
         for ip in iplist:
             perform_op(ra, ip, perform)
         log("Done")
         sys.exit()    
     
     log("RA Type:" + ra)
     log("Test Count:" + str(itnum))
         
     # Prepare Random
     f = open("/dev/urandom", "r")
     seed = struct.unpack("BBB", f.read(3))
     f.close()
     #seed=(123,321,231)
     rand = random.Random()
     rand.seed(seed[0]) 
     log("Test Random Seed:" + str(seed))
     
     #
     # Begin Tests
     
     log(">>>>>>>>>>>>>>>>>>>>>>>>")
     for i in range(0, itnum):
         ip = rand.choice(iplist)
         if ipstatus[ip] == 0:
             op = "stop"
         elif ipstatus[ip] == 7:
             op = "start"
         else :
             op = rand.choice(["start","stop"])
             
         ret = perform_op(ra, ip, op)
         # update status
         if op == "start" and ret == 0:
             ipstatus[ip] = 0
         elif op == "stop" and ret == 0:
             ipstatus[ip] = 7
         else :
             ipstatus[ip] = 1
         result = ""
         if ret == 0:
             result = "success"
         else :
             result = "failure"
         summary[op]["called"] += 1
         summary[op][result] += 1
         log( "%d:%s %s \t[%s]"%(i, op, ip, result))
         audit(ra, iplist, ipstatus, summary)
         
     log("<<<<<<<<<<<<<<<<<<<<<<<<")
     log("start:\t" + str(summary["start"]))
     log("stop: \t" + str(summary["stop"]))
     log("audit:\t" + str(summary["audit"]))
     
diff --git a/cts/cib_xml.py b/cts/cib_xml.py
index d74d45275d..0bd963bfe6 100644
--- a/cts/cib_xml.py
+++ b/cts/cib_xml.py
@@ -1,251 +1,249 @@
 '''CTS: Cluster Testing System: CIB generator
 '''
 __copyright__ = '''
 Author: Andrew Beekhof <abeekhof@suse.de>
 Copyright (C) 2008 Andrew Beekhof
 '''
 
-from UserDict import UserDict
-import sys, time, types, syslog, os, struct, string, signal, traceback, warnings, socket
+import sys
 
 from cts.CTSvars import *
-from cts.CTS     import ClusterManager
 from cts.CIB     import CibBase
 
 
 class XmlBase(CibBase):
     def __init__(self, Factory, tag, _id, **kwargs):
         CibBase.__init__(self, Factory, tag, _id, **kwargs)
 
     def show(self):
         text = '''<%s''' % self.tag
         if self.name:
             text += ''' id="%s"''' % (self.name)
         for k in self.kwargs.keys():
             text += ''' %s="%s"''' % (k, self.kwargs[k])
 
         if not self.children:
             text += '''/>'''
             return text
 
         text += '''>'''
 
         for c in self.children:
             text += c.show()
 
         text += '''</%s>''' % self.tag
         return text
 
     def _run(self, operation, xml, section="all", options=""):
         self.Factory.debug("Writing out %s" % self.name)
         fixed  = "HOME=/root CIB_file="+self.Factory.tmpfile
         fixed += " cibadmin --%s --scope %s %s --xml-text '%s'" % (operation, section, options, xml)
         rc = self.Factory.rsh(self.Factory.target, fixed)
         if rc != 0:
             self.Factory.log("Configure call failed: "+fixed)
             sys.exit(1)
 
 
 class FencingTopology(XmlBase):
     def __init__(self, Factory):
         XmlBase.__init__(self, Factory, "fencing-topology", None)
 
     def level(self, index, node, devices):
         self.add_child(XmlBase(self.Factory, "fencing-level", "cts-%s.%d" % (node, index), target=node, index=index, devices=devices))
 
     def commit(self):
         self._run("create", self.show(), "configuration", "--allow-create")
 
 
 class Option(XmlBase):
     def __init__(self, Factory, name=None, value=None, section="cib-bootstrap-options"):
         XmlBase.__init__(self, Factory, "cluster_property_set", section)
         if name and value:
             self.add_child(XmlBase(Factory, "nvpair", "cts-%s" % name, name=name, value=value))
 
     def __setitem__(self, key, value):
         self.add_child(XmlBase(self.Factory, "nvpair", "cts-%s" % key, name=key, value=value))
 
     def commit(self):
         self._run("modify", self.show(), "crm_config", "--allow-create")
 
 
 class Expression(XmlBase):
     def __init__(self, Factory, name, attr, op, value=None):
         XmlBase.__init__(self, Factory, "expression", name, attribute=attr, operation=op)
         if value:
             self["value"] = value
 
 
 class Rule(XmlBase):
     def __init__(self, Factory, name, score, op="and", expr=None):
         XmlBase.__init__(self, Factory, "rule", "%s" % name)
         self["boolean-op"] = op
         self["score"] = score
         if expr:
             self.add_child(expr)
 
 
 class Resource(XmlBase):
     def __init__(self, Factory, name, rtype, standard, provider=None):
         XmlBase.__init__(self, Factory, "native", name)
 
         self.rtype = rtype
         self.standard = standard
         self.provider = provider
 
         self.op = []
         self.meta = {}
         self.param = {}
 
         self.scores = {}
         self.needs = {}
         self.coloc = {}
 
         if self.standard == "ocf" and not provider:
             self.provider = "heartbeat"
         elif self.standard == "lsb":
             self.provider = None
 
     def __setitem__(self, key, value):
         self.add_param(key, value)
 
     def add_op(self, name, interval, **kwargs):
         self.op.append(
             XmlBase(self.Factory, "op", "%s-%s" % (name, interval), name=name, interval=interval, **kwargs))
 
     def add_param(self, name, value):
         self.param[name] = value
 
     def add_meta(self, name, value):
         self.meta[name] = value
 
     def prefer(self, node, score="INFINITY", rule=None):
         if not rule:
             rule = Rule(self.Factory, "prefer-%s-r" % node, score,
                         expr=Expression(self.Factory, "prefer-%s-e" % node, "#uname", "eq", node))
         self.scores[node] = rule
 
     def after(self, resource, kind="Mandatory", first="start", then="start", **kwargs):
         kargs = kwargs.copy()
         kargs["kind"] = kind
         if then:
             kargs["first-action"] = "start"
             kargs["then-action"] = then
 
         if first:
             kargs["first-action"] = first
 
         self.needs[resource] = kargs
 
     def colocate(self, resource, score="INFINITY", role=None, withrole=None, **kwargs):
         kargs = kwargs.copy()
         kargs["score"] = score
         if role:
             kargs["rsc-role"] = role
         if withrole:
             kargs["with-rsc-role"] = withrole
 
         self.coloc[resource] = kargs
 
     def constraints(self):
         text = "<constraints>"
 
         for k in self.scores.keys():
             text += '''<rsc_location id="prefer-%s" rsc="%s">''' % (k, self.name)
             text += self.scores[k].show()
             text += '''</rsc_location>'''
 
         for k in self.needs.keys():
             text += '''<rsc_order id="%s-after-%s" first="%s" then="%s"''' % (self.name, k, k, self.name)
             kargs = self.needs[k]
             for kw in kargs.keys():
                 text += ''' %s="%s"''' % (kw, kargs[kw])
             text += '''/>'''
 
         for k in self.coloc.keys():
             text += '''<rsc_colocation id="%s-with-%s" rsc="%s" with-rsc="%s"''' % (self.name, k, self.name, k)
             kargs = self.coloc[k]
             for kw in kargs.keys():
                 text += ''' %s="%s"''' % (kw, kargs[kw])
             text += '''/>'''
 
         text += "</constraints>"
         return text
 
     def show(self):
         text = '''<primitive id="%s" class="%s" type="%s"''' % (self.name, self.standard, self.rtype)
         if self.provider:
             text += ''' provider="%s"''' % (self.provider)
         text += '''>'''
 
         if len(self.meta) > 0:
             text += '''<meta_attributes id="%s-meta">''' % self.name
             for p in self.meta.keys():
                 text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.meta[p])
             text += '''</meta_attributes>'''
 
         if len(self.param) > 0:
             text += '''<instance_attributes id="%s-params">''' % self.name
             for p in self.param.keys():
                 text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.param[p])
             text += '''</instance_attributes>'''
 
         if len(self.op) > 0:
             text += '''<operations>'''
             for o in self.op:
                 key = o.name
                 o.name = "%s-%s" % (self.name, key)
                 text += o.show()
                 o.name = key
             text += '''</operations>'''
 
         text += '''</primitive>'''
         return text
 
     def commit(self):
         self._run("create", self.show(), "resources")
         self._run("modify", self.constraints())
 
 
 class Group(Resource):
     def __init__(self, Factory, name):
         Resource.__init__(self, Factory, name, None, None)
         self.tag = "group"
 
     def __setitem__(self, key, value):
         self.add_meta(key, value)
 
     def show(self):
         text = '''<%s id="%s">''' % (self.tag, self.name)
 
         if len(self.meta) > 0:
             text += '''<meta_attributes id="%s-meta">''' % self.name
             for p in self.meta.keys():
                 text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.meta[p])
             text += '''</meta_attributes>'''
 
         for c in self.children:
             text += c.show()
         text += '''</%s>''' % self.tag
         return text
 
 
 class Clone(Group):
     def __init__(self, Factory, name, child=None):
         Group.__init__(self, Factory, name)
         self.tag = "clone"
         if child:
             self.add_child(child)
 
     def add_child(self, resource):
         if not self.children:
             self.children.append(resource)
         else:
             self.Factory.log("Clones can only have a single child. Ignoring %s" % resource.name)
 
 
 class Master(Clone):
     def __init__(self, Factory, name, child=None):
         Clone.__init__(self, Factory, name, child)
         self.tag = "master"
diff --git a/cts/environment.py b/cts/environment.py
index e76c36d29e..6edf331f09 100644
--- a/cts/environment.py
+++ b/cts/environment.py
@@ -1,680 +1,674 @@
 '''
 Classes related to producing and searching logs
 '''
 
 __copyright__='''
 Copyright (C) 2014 Andrew Beekhof <andrew@beekhof.net>
 Licensed under the GNU GPL.
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
-import types, string, select, sys, time, re, os, struct, signal, socket
-import time, syslog, random, traceback, base64, pickle, binascii, fcntl
+import sys, time, os, socket, random
 
 from cts.remote import *
 
 class Environment:
 
     def __init__(self, args):
         self.data = {}
         self.Nodes = []
 
         self["DeadTime"] = 300
         self["StartTime"] = 300
         self["StableTime"] = 30
         self["tests"] = []
         self["IPagent"] = "IPaddr2"
         self["DoStandby"] = 1
         self["DoFencing"] = 1
         self["XmitLoss"] = "0.0"
         self["RecvLoss"] = "0.0"
         self["ClobberCIB"] = 0
         self["CIBfilename"] = None
         self["CIBResource"] = 0
         self["DoBSC"]    = 0
         self["use_logd"] = 0
         self["oprofile"] = []
         self["warn-inactive"] = 0
         self["ListTests"] = 0
         self["benchmark"] = 0
         self["LogWatcher"] = "any"
         self["SyslogFacility"] = "daemon"
         self["LogFileName"] = "/var/log/messages"
         self["Schema"] = "pacemaker-2.0"
         self["Stack"] = "corosync"
         self["stonith-type"] = "external/ssh"
         self["stonith-params"] = "hostlist=all,livedangerously=yes"
         self["loop-minutes"] = 60
         self["valgrind-prefix"] = None
         self["valgrind-procs"] = "cib crmd attrd pengine stonith-ng"
         self["valgrind-opts"] = """--leak-check=full --show-reachable=yes --trace-children=no --num-callers=25 --gen-suppressions=all --suppressions="""+CTSvars.CTS_home+"""/cts.supp"""
 
         self["experimental-tests"] = 0
         self["container-tests"] = 0
         self["valgrind-tests"] = 0
         self["unsafe-tests"] = 1
         self["loop-tests"] = 1
         self["scenario"] = "random"
         self["stats"] = 0
         self["docker"] = 0
 
         self.RandomGen = random.Random()
         self.logger = LogFactory()
 
         self.SeedRandom()
         self.rsh = RemoteFactory().getInstance()
 
         self.target = "localhost"
 
         self.parse_args(args)
         self.discover()
         self.validate()
 
     def SeedRandom(self, seed=None):
         if not seed:
             seed = int(time.time())
 
-        if self.has_key("RandSeed"):
-            self.logger.log("New random seed is: " + str(seed))
-        else:
-            self.logger.log("Random seed is: " + str(seed))
-
         self["RandSeed"] = seed
         self.RandomGen.seed(str(seed))
 
     def dump(self):
         keys = []
         for key in self.data.keys():
             keys.append(key)
 
         keys.sort()
         for key in keys:
             self.logger.debug("Environment["+key+"]:\t"+str(self[key]))
 
     def keys(self):
         return self.data.keys()
 
     def has_key(self, key):
         if key == "nodes":
             return True
 
         return self.data.has_key(key)
 
     def __getitem__(self, key):
         if key == "nodes":
             return self.Nodes
 
         elif key == "Name":
             return self.get_stack_short()
 
         elif self.data.has_key(key):
             return self.data[key]
 
         else:
             return None
 
     def __setitem__(self, key, value):
         if key == "Stack":
             self.set_stack(value)
 
         elif key == "node-limit":
             self.data[key] = value
             self.filter_nodes()
 
         elif key == "nodes":
             self.Nodes = []
             for node in value:
                 # I don't think I need the IP address, etc. but this validates
                 # the node name against /etc/hosts and/or DNS, so it's a
                 # GoodThing(tm).
                 try:
                     n = node.strip()
                     if self.data["docker"] == 0:
-                        gethostbyname_ex(n)
+                        socket.gethostbyname_ex(n)
 
                     self.Nodes.append(n) 
                 except:
                     self.logger.log(node+" not found in DNS... aborting")
                     raise
 
             self.filter_nodes()
 
         else:
             self.data[key] = value
 
     def RandomNode(self):
         '''Choose a random node from the cluster'''
         return self.RandomGen.choice(self["nodes"])
 
     def set_stack(self, name):
         # Normalize stack names
         if name == "heartbeat" or name == "lha":
             self.data["Stack"] = "heartbeat"
 
         elif name == "openais" or name == "ais"  or name == "whitetank":
             self.data["Stack"] = "openais (whitetank)"
 
         elif name == "corosync" or name == "cs" or name == "mcp":
             self.data["Stack"] = "corosync 2.x"
 
         elif name == "cman":
             self.data["Stack"] = "corosync (cman)"
 
         elif name == "v1":
             self.data["Stack"] = "corosync (plugin v1)"
 
         elif name == "v0":
             self.data["Stack"] = "corosync (plugin v0)"
 
         else:
             print "Unknown stack: "+name
             sys.exit(1)
 
     def get_stack_short(self):
         # Create the Cluster Manager object
         if not self.data.has_key("Stack"):
             return "unknown"
 
         elif self.data["Stack"] == "heartbeat":
             return "crm-lha"
 
         elif self.data["Stack"] == "corosync 2.x":
             if self["docker"]:
                 return "crm-mcp-docker"
             else:
                 return "crm-mcp"
 
         elif self.data["Stack"] == "corosync (cman)":
             return "crm-cman"
         
         elif self.data["Stack"] == "corosync (plugin v1)":
             return "crm-plugin-v1"
         
         elif self.data["Stack"] == "corosync (plugin v0)":
             return "crm-plugin-v0"
 
         else:
             LogFactory().log("Unknown stack: "+self.data["stack"])
             sys.exit(1)
 
     def detect_syslog(self):
         # Detect syslog variant
         if not self.has_key("syslogd"):
             if self["have_systemd"]:
                 # Systemd
                 self["syslogd"] = self.rsh(self.target, "systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", stdout=1).strip()
             else:
                 # SYS-V
                 self["syslogd"] = self.rsh(self.target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", stdout=1).strip()
 
             if not self.has_key("syslogd") or not self["syslogd"]:
                 # default
                 self["syslogd"] = "rsyslog"
 
     def detect_at_boot(self):
         # Detect if the cluster starts at boot
         if not self.has_key("at-boot"):
             atboot = 0
 
             if self["have_systemd"]:
             # Systemd
                 atboot = atboot or not self.rsh(self.target, "systemctl is-enabled heartbeat.service")
                 atboot = atboot or not self.rsh(self.target, "systemctl is-enabled corosync.service")
                 atboot = atboot or not self.rsh(self.target, "systemctl is-enabled pacemaker.service")
             else:
                 # SYS-V
                 atboot = atboot or not self.rsh(self.target, "chkconfig --list | grep -e corosync.*on -e heartbeat.*on -e pacemaker.*on")
 
             self["at-boot"] = atboot
 
     def detect_ip_offset(self):
         # Try to determin an offset for IPaddr resources
         if self["CIBResource"] and not self.has_key("IPBase"):
             network=self.rsh(self.target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", stdout=1).strip()
             self["IPBase"] = self.rsh(self.target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, stdout=1).strip()
             if not self["IPBase"]:
                 self["IPBase"] = " fe80::1234:56:7890:1000"
                 self.logger.log("Could not determine an offset for IPaddr resources.  Perhaps nmap is not installed on the nodes.")
                 self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
 
             elif int(self["IPBase"].split('.')[3]) >= 240:
                 self.logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s"
                                 % (self["IPBase"], self["IPBase"].split('.')[3]))
                 self["IPBase"] = " fe80::1234:56:7890:1000"
                 self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
 
     def filter_nodes(self):
         if self["node-limit"] > 0:
             if len(self["nodes"]) > self["node-limit"]:
                 self.logger.log("Limiting the number of nodes configured=%d (max=%d)"
                                 %(len(self["nodes"]), self["node-limit"]))
                 while len(self["nodes"]) > self["node-limit"]:
                     self["nodes"].pop(len(self["nodes"])-1)
 
     def validate(self):
         if len(self["nodes"]) < 1:
             print "No nodes specified!"
             sys.exit(1)
 
     def discover(self):
         self.target = random.Random().choice(self["nodes"])
 
         master = socket.gethostname()
 
         # Use the IP where possible to avoid name lookup failures
         for ip in socket.gethostbyname_ex(master)[2]:
             if ip != "127.0.0.1":
                 master = ip
                 break;
         self["cts-master"] = master
 
         if not self.has_key("have_systemd"):
             self["have_systemd"] = not self.rsh(self.target, "systemctl list-units")
         
         self.detect_syslog()
         self.detect_at_boot()
         self.detect_ip_offset()
 
         self.validate()
 
     def parse_args(self, args):
         skipthis=None
 
         if not args:
             args=sys.argv[1:]
 
         for i in range(0, len(args)):
             if skipthis:
                 skipthis=None
                 continue
 
             elif args[i] == "-l" or args[i] == "--limit-nodes":
                 skipthis=1
                 self["node-limit"] = int(args[i+1])
 
             elif args[i] == "-r" or args[i] == "--populate-resources":
                 self["CIBResource"] = 1
                 self["ClobberCIB"] = 1
 
             elif args[i] == "--outputfile":
                 skipthis=1
                 self["OutputFile"] = args[i+1]
                 LogFactory().add_file(self["OutputFile"])
 
             elif args[i] == "-L" or args[i] == "--logfile":
                 skipthis=1
                 self["LogWatcher"] = "remote"
                 self["LogAuditDisabled"] = 1
                 self["LogFileName"] = args[i+1]
 
             elif args[i] == "--ip" or args[i] == "--test-ip-base":
                 skipthis=1
                 self["IPBase"] = args[i+1]
                 self["CIBResource"] = 1
                 self["ClobberCIB"] = 1
 
             elif args[i] == "--oprofile":
                 skipthis=1
                 self["oprofile"] = args[i+1].split(' ')
 
             elif args[i] == "--trunc":
                 self["TruncateLog"]=1
 
             elif args[i] == "--list-tests" or args[i] == "--list" :
                 self["ListTests"]=1
 
             elif args[i] == "--benchmark":
                 self["benchmark"]=1
 
             elif args[i] == "--bsc":
                 self["DoBSC"] = 1
                 self["scenario"] = "basic-sanity"
 
             elif args[i] == "--qarsh":
                 RemoteFactory().enable_qarsh()
 
             elif args[i] == "--docker":
                 self["docker"] = 1
                 RemoteFactory().enable_docker()
 
             elif args[i] == "--stonith" or args[i] == "--fencing":
                 skipthis=1
                 if args[i+1] == "1" or args[i+1] == "yes":
                     self["DoFencing"]=1
                 elif args[i+1] == "0" or args[i+1] == "no":
                     self["DoFencing"]=0
                 elif args[i+1] == "rhcs" or args[i+1] == "xvm" or args[i+1] == "virt":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_xvm"
                     self["stonith-params"] = "pcmk_arg_map=domain:uname,delay=0"
                 elif args[i+1] == "docker":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_docker_cts"
                 elif args[i+1] == "scsi":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_scsi"
                     self["stonith-params"] = "delay=0"
                 elif args[i+1] == "ssh" or args[i+1] == "lha":
                     self["DoStonith"]=1
                     self["stonith-type"] = "external/ssh"
                     self["stonith-params"] = "hostlist=all,livedangerously=yes"
                 elif args[i+1] == "north":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=north-apc,login=apc,passwd=apc,pcmk_host_map=north-01:2;north-02:3;north-03:4;north-04:5;north-05:6;north-06:7;north-07:9;north-08:10;north-09:11;north-10:12;north-11:13;north-12:14;north-13:15;north-14:18;north-15:17;north-16:19;"
                 elif args[i+1] == "south":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=south-apc,login=apc,passwd=apc,pcmk_host_map=south-01:2;south-02:3;south-03:4;south-04:5;south-05:6;south-06:7;south-07:9;south-08:10;south-09:11;south-10:12;south-11:13;south-12:14;south-13:15;south-14:18;south-15:17;south-16:19;"
                 elif args[i+1] == "east":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;"
                 elif args[i+1] == "west":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=west-apc,login=apc,passwd=apc,pcmk_host_map=west-01:2;west-02:3;west-03:4;west-04:5;west-05:6;west-06:7;west-07:9;west-08:10;west-09:11;west-10:12;west-11:13;west-12:14;west-13:15;west-14:18;west-15:17;west-16:19;"
                 elif args[i+1] == "openstack":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_openstack"
                     
                     print "Obtaining OpenStack credentials from the current environment"
                     self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % (
                         os.environ['OS_REGION_NAME'],
                         os.environ['OS_TENANT_NAME'],
                         os.environ['OS_AUTH_URL'],
                         os.environ['OS_USERNAME'],
                         os.environ['OS_PASSWORD']
                     )
                     
                 elif args[i+1] == "rhevm":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_rhevm"
                     
                     print "Obtaining RHEV-M credentials from the current environment"
                     self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
                         os.environ['RHEVM_USERNAME'],
                         os.environ['RHEVM_PASSWORD'],
                         os.environ['RHEVM_SERVER'],
                         os.environ['RHEVM_PORT'],
                     )
                     
                 else:
                     self.usage(args[i+1])
 
             elif args[i] == "--stonith-type":
                 self["stonith-type"] = args[i+1]
                 skipthis=1
 
             elif args[i] == "--stonith-args":
                 self["stonith-params"] = args[i+1]
                 skipthis=1
 
             elif args[i] == "--standby":
                 skipthis=1
                 if args[i+1] == "1" or args[i+1] == "yes":
                     self["DoStandby"] = 1
                 elif args[i+1] == "0" or args[i+1] == "no":
                     self["DoStandby"] = 0
                 else:
                     self.usage(args[i+1])
 
             elif args[i] == "--clobber-cib" or args[i] == "-c":
                 self["ClobberCIB"] = 1
                 
             elif args[i] == "--cib-filename":
                 skipthis=1
                 self["CIBfilename"] = args[i+1]
 
             elif args[i] == "--xmit-loss":
                 try:
                     float(args[i+1])
                 except ValueError:
                     print ("--xmit-loss parameter should be float")
                     self.usage(args[i+1])
                 skipthis=1
                 self["XmitLoss"] = args[i+1]
 
             elif args[i] == "--recv-loss":
                 try:
                     float(args[i+1])
                 except ValueError:
                     print ("--recv-loss parameter should be float")
                     self.usage(args[i+1])
                 skipthis=1
                 self["RecvLoss"] = args[i+1]
 
             elif args[i] == "--choose":
                 skipthis=1
                 self["tests"].append(args[i+1])
                 self["scenario"] = "sequence"
 
             elif args[i] == "--nodes":
                 skipthis=1
                 self["nodes"] = args[i+1].split(' ')
 
             elif args[i] == "-g" or args[i] == "--group" or args[i] == "--dsh-group":
                 skipthis=1
                 self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args[i+1])
                 LogFactory().add_file(self["OutputFile"], "CTS")
 
                 dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args[i+1])
 
                 # Hacks to make my life easier
                 if args[i+1] == "r6":
                     self["Stack"] = "cman"
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_xvm"
                     self["stonith-params"] = "delay=0"
                     self["IPBase"] = " fe80::1234:56:7890:4000"
 
                 elif args[i+1] == "virt1":
                     self["Stack"] = "corosync"
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_xvm"
                     self["stonith-params"] = "delay=0"
                     self["IPBase"] = " fe80::1234:56:7890:1000"
 
                 elif args[i+1] == "east16" or args[i+1] == "nsew":
                     self["Stack"] = "corosync"
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;"
                     self["IPBase"] = " fe80::1234:56:7890:2000"
 
                     if args[i+1] == "east16":
                         # Requires newer python than available via nsew
                         self["IPagent"] = "Dummy"
 
                 elif args[i+1] == "corosync8":
                     self["Stack"] = "corosync"
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_rhevm"
 
                     print "Obtaining RHEV-M credentials from the current environment"
                     self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
                         os.environ['RHEVM_USERNAME'],
                         os.environ['RHEVM_PASSWORD'],
                         os.environ['RHEVM_SERVER'],
                         os.environ['RHEVM_PORT'],
                    )
                     self["IPBase"] = " fe80::1234:56:7890:3000"
 
                 if os.path.isfile(dsh_file):
                     self["nodes"] = []
                     f = open(dsh_file, 'r')
                     for line in f:
                         l = line.strip().rstrip()
                         if not l.startswith('#'):
                             self["nodes"].append(l)
                     f.close()
 
                 else:
                     print("Unknown DSH group: %s" % args[i+1])
 
             elif args[i] == "--syslog-facility" or args[i] == "--facility":
                 skipthis=1
                 self["SyslogFacility"] = args[i+1]
                 
             elif args[i] == "--seed":
                 skipthis=1
                 self.SeedRandom(args[i+1])
 
             elif args[i] == "--warn-inactive":
                 self["warn-inactive"] = 1
 
             elif args[i] == "--schema":
                 skipthis=1
                 self["Schema"] = args[i+1]
 
             elif args[i] == "--ais":
                 self["Stack"] = "openais"
 
             elif args[i] == "--at-boot" or args[i] == "--cluster-starts-at-boot":
                 skipthis=1
                 if args[i+1] == "1" or args[i+1] == "yes":
                     self["at-boot"] = 1
                 elif args[i+1] == "0" or args[i+1] == "no":
                     self["at-boot"] = 0
                 else:
                     self.usage(args[i+1])
 
             elif args[i] == "--heartbeat" or args[i] == "--lha":
                 self["Stack"] = "heartbeat"
 
             elif args[i] == "--hae":
                 self["Stack"] = "openais"
                 self["Schema"] = "hae"
 
             elif args[i] == "--stack":
                 if args[i+1] == "fedora" or args[i+1] == "fedora-17" or args[i+1] == "fedora-18":
                     self["Stack"] = "corosync"
                 elif args[i+1] == "rhel-6":
                     self["Stack"] = "cman"
                 elif args[i+1] == "rhel-7":
                     self["Stack"] = "corosync"
                 else:
                     self["Stack"] = args[i+1]
                 skipthis=1
 
             elif args[i] == "--once":
                 self["scenario"] = "all-once"
 
             elif args[i] == "--boot":
                 self["scenario"] = "boot"
 
             elif args[i] == "--valgrind-tests":
                 self["valgrind-tests"] = 1
 
             elif args[i] == "--no-loop-tests":
                 self["loop-tests"] = 0
 
             elif args[i] == "--loop-minutes":
                 skipthis=1
                 try:
                     self["loop-minutes"]=int(args[i+1])
                 except ValueError:
                     self.usage(args[i])
 
             elif args[i] == "--no-unsafe-tests":
                 self["unsafe-tests"] = 0
 
             elif args[i] == "--experimental-tests":
                 self["experimental-tests"] = 1
 
             elif args[i] == "--container-tests":
                 self["container-tests"] = 1
 
             elif args[i] == "--set":
                 skipthis=1
                 (name, value) = args[i+1].split('=')
                 self[name] = value
                 print "Setting %s = %s" % (name, value)
                 
             elif args[i] == "--help":
                 self.usage(args[i], 0)
 
             elif args[i] == "--":
                 break
 
             else:
                 try:
                     NumIter=int(args[i])
                     self["iterations"] = NumIter
                 except ValueError:
                     self.usage(args[i])
 
     def usage(self, arg, status=1):
         if status:
             print "Illegal argument %s" % arg
         print "usage: " + sys.argv[0] +" [options] number-of-iterations"
         print "\nCommon options: "
         print "\t [--nodes 'node list']        list of cluster nodes separated by whitespace"
         print "\t [--group | -g 'name']        use the nodes listed in the named DSH group (~/.dsh/groups/$name)"
         print "\t [--limit-nodes max]          only use the first 'max' cluster nodes supplied with --nodes"
         print "\t [--stack (v0|v1|cman|corosync|heartbeat|openais)]    which cluster stack is installed"
         print "\t [--list-tests]               list the valid tests"
         print "\t [--benchmark]                add the timing information"
         print "\t "
         print "Options that CTS will usually auto-detect correctly: "
         print "\t [--logfile path]             where should the test software look for logs from cluster nodes"
         print "\t [--syslog-facility name]     which syslog facility should the test software log to"
         print "\t [--at-boot (1|0)]            does the cluster software start at boot time"
         print "\t [--test-ip-base ip]          offset for generated IP address resources"
         print "\t "
         print "Options for release testing: "
         print "\t [--populate-resources | -r]  generate a sample configuration"
         print "\t [--choose name]              run only the named test"
         print "\t [--stonith (1 | 0 | yes | no | rhcs | ssh)]"
         print "\t [--once]                     run all valid tests once"
         print "\t "
         print "Additional (less common) options: "
         print "\t [--clobber-cib | -c ]        erase any existing configuration"
         print "\t [--outputfile path]          optional location for the test software to write logs to"
         print "\t [--trunc]                    truncate logfile before starting"
         print "\t [--xmit-loss lost-rate(0.0-1.0)]"
         print "\t [--recv-loss lost-rate(0.0-1.0)]"
         print "\t [--standby (1 | 0 | yes | no)]"
         print "\t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]"
         print "\t [--stonith-type type]"
         print "\t [--stonith-args name=value]"
         print "\t [--bsc]"
         print "\t [--no-loop-tests]            dont run looping/time-based tests"
         print "\t [--no-unsafe-tests]          dont run tests that are unsafe for use with ocfs2/drbd"
         print "\t [--valgrind-tests]           include tests using valgrind"
         print "\t [--experimental-tests]       include experimental tests"
         print "\t [--container-tests]          include pacemaker_remote tests that run in lxc container resources"
         print "\t [--oprofile 'node list']     list of cluster nodes to run oprofile on]"
         print "\t [--qarsh]                    use the QARSH backdoor to access nodes instead of SSH"
         print "\t [--docker]                   Indicates nodes are docker nodes."
         print "\t [--seed random_seed]"
         print "\t [--set option=value]"
         print "\t "
         print "\t Example: "
         print "\t    python sys.argv[0] -g virt1 --stack cs -r --stonith ssh --schema pacemaker-1.0 500"
 
         sys.exit(status)
 
 class EnvFactory:
     instance = None
     def __init__(self):
         pass
 
     def getInstance(self, args=None):
         if not EnvFactory.instance:
             EnvFactory.instance = Environment(args)
         return EnvFactory.instance
diff --git a/cts/logging.py b/cts/logging.py
index dce709f407..8afa6111ca 100644
--- a/cts/logging.py
+++ b/cts/logging.py
@@ -1,113 +1,112 @@
 '''
 Classes related to producing logs
 '''
 
 __copyright__='''
 Copyright (C) 2014 Andrew Beekhof <andrew@beekhof.net>
 Licensed under the GNU GPL.
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
-import types, string, sys, time, re, os, syslog
+import types, string, sys, time, os
 
 class Logger:
     TimeFormat = "%b %d %H:%M:%S\t"
 
     def __call__(self, lines):
         raise ValueError("Abstract class member (__call__)")
     def write(self, line):
         return self(line.rstrip())
     def writelines(self, lines):
         for s in lines:
             self.write(s)
         return 1
     def flush(self):
         return 1
     def isatty(self):
         return None
 
 class StdErrLog(Logger):
 
     def __init__(self, filename, tag):
         pass
 
     def __call__(self, lines):
         t = time.strftime(Logger.TimeFormat, time.localtime(time.time()))
         if isinstance(lines, types.StringType):
             sys.__stderr__.writelines([t, lines, "\n"])
         else:
             for line in lines:
                 sys.__stderr__.writelines([t, line, "\n"])
         sys.__stderr__.flush()
 
     def name(self):
         return "StdErrLog"
 
 class FileLog(Logger):
     def __init__(self, filename, tag):
         self.logfile=filename
-        import os
         self.hostname = os.uname()[1]+" "
 
         self.source = ""
         if tag:
             self.source = tag+": "
 
     def __call__(self, lines):
 
         fd = open(self.logfile, "a")
         t = time.strftime(Logger.TimeFormat, time.localtime(time.time()))
 
         if isinstance(lines, types.StringType):
             fd.writelines([t, self.hostname, self.source, lines, "\n"])
         else:
             for line in lines:
                 fd.writelines([t, self.hostname, self.source, line, "\n"])
         fd.close()
 
     def name(self):
         return "FileLog"
 
 class LogFactory:
 
     log_methods=[]
     have_stderr = False
 
     def __init__(self):
         pass
 
     def add_file(self, filename, tag=None):
         if filename:
             LogFactory.log_methods.append(FileLog(filename, tag))
 
     def add_stderr(self):
         if not LogFactory.have_stderr:
             LogFactory.have_stderr = True
             LogFactory.log_methods.append(StdErrLog(None, None))
 
     def log(self, args):
         for logfn in LogFactory.log_methods:
             logfn(string.strip(args))
 
     def debug(self, args):
         for logfn in LogFactory.log_methods:
             if logfn.name() != "StdErrLog":
                 logfn("debug: %s" % string.strip(args))
 
     def traceback(self, traceback):
         for logfn in LogFactory.log_methods:
             traceback.print_exc(50, logfn)
diff --git a/cts/patterns.py b/cts/patterns.py
index fe5299a5fd..a0403b14a7 100644
--- a/cts/patterns.py
+++ b/cts/patterns.py
@@ -1,532 +1,529 @@
-from UserDict import UserDict
-import sys, time, types, syslog, os, struct, string, signal, traceback, warnings, socket
+import sys, os
 
 from cts.CTSvars import *
 
 patternvariants = {}
 class BasePatterns:
     def __init__(self, name):
         self.name = name
         patternvariants[name] = self
         self.ignore = []
         self.BadNews = []
         self.components = {}
         self.commands = {
             "StatusCmd"      : "crmadmin -t 60000 -S %s 2>/dev/null",
             "CibQuery"       : "cibadmin -Ql",
             "CibAddXml"      : "cibadmin --modify -c --xml-text %s",
             "CibDelXpath"    : "cibadmin --delete --xpath %s",
             # 300,000 == 5 minutes
             "RscRunning"     : CTSvars.CRM_DAEMON_DIR + "/lrmd_test -R -r %s",
             "CIBfile"        : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml",
             "TmpDir"         : "/tmp",
 
             "BreakCommCmd"   : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
             "FixCommCmd"     : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
 
 # tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit
 # tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated
 # tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1
 # tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null
             "ReduceCommCmd"  : "",
             "RestoreCommCmd" : "tc qdisc del dev lo root",
 
             "UUIDQueryCmd"    : "crmadmin -N",
 
             "MaintenanceModeOn"    : "cibadmin --modify -c --xml-text '<cluster_property_set id=\"cib-bootstrap-options\"><nvpair id=\"cts-maintenance-mode-setting\" name=\"maintenance-mode\" value=\"true\"/></cluster_property_set>'",
             "MaintenanceModeOff"    : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
 
             "StandbyCmd"      : "crm_attribute -VQ  -U %s -n standby -l forever -v %s 2>/dev/null",
             "StandbyQueryCmd" : "crm_attribute -QG -U %s -n standby -l forever -d off 2>/dev/null",
         }
         self.search = {
             "Pat:DC_IDLE"      : "crmd.*State transition.*-> S_IDLE",
             
             # This wont work if we have multiple partitions
             "Pat:Local_started" : "%s\W.*The local CRM is operational",
             "Pat:Slave_started" : "%s\W.*State transition.*-> S_NOT_DC",
             "Pat:Master_started": "%s\W.*State transition.*-> S_IDLE",
             "Pat:We_stopped"    : "heartbeat.*%s.*Heartbeat shutdown complete",
             "Pat:Logd_stopped"  : "%s\W.*logd:.*Exiting write process",
             "Pat:They_stopped"  : "%s\W.*LOST:.* %s ",
             "Pat:They_dead"     : "node %s.*: is dead",
             "Pat:TransitionComplete" : "Transition status: Complete: complete",
 
             "Pat:Fencing_start" : "Initiating remote operation .* for %s",
-            "Pat:Fencing_ok"    : "stonith.*remote_op_done:.*Operation .* of %s by .*: OK",
+            "Pat:Fencing_ok"    : r"stonith.*:\s*Operation .* of %s by .* for .*@.*: OK",
 
-            "Pat:RscOpOK"       : "process_lrm_event:.*Operation %s_%s.*ok.*confirmed",
-            "Pat:RscRemoteOpOK" : "process_lrm_event:.*Operation %s_%s.*ok.*node=%s, .*confirmed.*true",
-            "Pat:NodeFenced"    : "tengine_stonith_notify:.*Peer %s was terminated .*: OK",
+            "Pat:RscOpOK"       : r"crmd.*:\s*Operation %s_%s.*:\s*ok \(.*confirmed=\S+\)",
+            "Pat:RscRemoteOpOK" : r"crmd.*:\s*Operation %s_%s.*:\s*ok \(node=%s,.*,\s*confirmed=true\)",
+            "Pat:NodeFenced"    : r"crmd.*:\s*Peer\s+%s\s+was\s+terminated\s+\(.*\)\s+by\s+.*\s+for\s+.*:\s+OK",
             "Pat:FenceOpOK"     : "Operation .* for host '%s' with device .* returned: 0",
         }
 
     def get_component(self, key):
         if self.components.has_key(key):
             return self.components[key]
         print "Unknown component '%s' for %s" % (key, self.name)
         return []
 
     def get_patterns(self, key):
         if key == "BadNews":
             return self.BadNews
         elif key == "BadNewsIgnore":
             return self.ignore
         elif key == "Commands":
             return self.commands
         elif key == "Search":
             return self.search
         elif key == "Components":
             return self.components
 
     def __getitem__(self, key):
         if key == "Name":
             return self.name
         elif self.commands.has_key(key):
             return self.commands[key]
         elif self.search.has_key(key):
             return self.search[key]
         else:
             print "Unknown template '%s' for %s" % (key, self.name)
             return None
 
 class crm_lha(BasePatterns):
     def __init__(self, name):
         BasePatterns.__init__(self, name)
 
         self.commands.update({
             "StartCmd"       : "service heartbeat start > /dev/null 2>&1",
             "StopCmd"        : "service heartbeat stop  > /dev/null 2>&1",
             "EpocheCmd"      : "crm_node -H -e",
             "QuorumCmd"      : "crm_node -H -q",
             "ParitionCmd"    : "crm_node -H -p",
         })
 
         self.search.update({
             # Patterns to look for in the log files for various occasions...
             "Pat:ChildKilled"  : "%s\W.*heartbeat.*%s.*killed by signal 9",
             "Pat:ChildRespawn" : "%s\W.*heartbeat.*Respawning client.*%s",
             "Pat:ChildExit"    : "(ERROR|error): Client .* exited with return code",            
         })
         self.BadNews = [
                 r"error:",
                 r"crit:",
                 r"ERROR:",
                 r"CRIT:",
                 r"Shutting down...NOW",
                 r"Timer I_TERMINATE just popped",
                 r"input=I_ERROR",
                 r"input=I_FAIL",
                 r"input=I_INTEGRATED cause=C_TIMER_POPPED",
                 r"input=I_FINALIZED cause=C_TIMER_POPPED",
                 r"input=I_ERROR",
                 r", exiting\.",
                 r"WARN.*Ignoring HA message.*vote.*not in our membership list",
                 r"pengine.*Attempting recovery of resource",
                 r"is taking more than 2x its timeout",
                 r"Confirm not received from",
                 r"Welcome reply not received from",
                 r"Attempting to schedule .* after a stop",
                 r"Resource .* was active at shutdown",
                 r"duplicate entries for call_id",
                 r"Search terminated:",
                 r"No need to invoke the TE",
                 r"global_timer_callback:",
                 r"Faking parameter digest creation",
                 r"Parameters to .* action changed:",
                 r"Parameters to .* changed",
             ]
 
         self.ignore = [
-                "(ERROR|error): crm_abort:.*crm_glib_handler: ",
+                r"(ERROR|error):.*\s+assert\s+at\s+crm_glib_handler:"
                 "(ERROR|error): Message hist queue is filling up",
                 "stonithd.*CRIT: external_hostlist:.*'vmware gethosts' returned an empty hostlist",
                 "stonithd.*(ERROR|error): Could not list nodes for stonith RA external/vmware.",
                 "pengine.*Preventing .* from re-starting",
                 ]
 
 class crm_cs_v0(BasePatterns):
     def __init__(self, name):
         BasePatterns.__init__(self, name)
 
         self.commands.update({
             "EpocheCmd"      : "crm_node -e --openais",
             "QuorumCmd"      : "crm_node -q --openais",
             "ParitionCmd"    : "crm_node -p --openais",
             "StartCmd"       : "service corosync start",
             "StopCmd"        : "service corosync stop",
         })
 
         self.search.update({
 # The next pattern is too early
 #            "Pat:We_stopped"   : "%s.*Service engine unloaded: Pacemaker Cluster Manager",
 # The next pattern would be preferred, but it doesn't always come out
 #            "Pat:We_stopped"   : "%s.*Corosync Cluster Engine exiting with status",
             "Pat:We_stopped"   : "%s\W.*Service engine unloaded: corosync cluster quorum service",
             "Pat:They_stopped" : "%s\W.*crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "corosync:.*Node %s is now: lost",
 
             "Pat:ChildExit"    : "Child process .* exited",
             "Pat:ChildKilled"  : "%s\W.*corosync.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s\W.*corosync.*Respawning failed child process: %s",
 
             "Pat:InfraUp"      : "%s\W.*corosync.*Initializing transport",
             "Pat:PacemakerUp"  : "%s\W.*pacemakerd.*Starting Pacemaker",
         })
 
         self.ignore = [
             r"crm_mon:",
             r"crmadmin:",
             r"update_trace_data",
             r"async_notify:.*strange, client not found",
             r"Parse error: Ignoring unknown option .*nodename",
             r"error: log_operation:.*Operation 'reboot' .* with device 'FencingFail' returned:",
             r"Child process .* terminated with signal 9",
             r"getinfo response error: 1$",
             "sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE",
-            "sbd.* pcmk:    error: crm_ipc_read: Connection to cib_ro failed",
-            "sbd.* pcmk:    error: mainloop_gio_callback: Connection to cib_ro.* closed .I/O condition=17",
+            r"sbd.* pcmk:\s*error:.*Connection to cib_ro failed",
+            r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* closed .I/O condition=17",
         ]
 
         self.BadNews = [
             r"error:",
             r"crit:",
             r"ERROR:",
             r"CRIT:",
             r"Shutting down...NOW",
             r"Timer I_TERMINATE just popped",
             r"input=I_ERROR",
             r"input=I_FAIL",
             r"input=I_INTEGRATED cause=C_TIMER_POPPED",
             r"input=I_FINALIZED cause=C_TIMER_POPPED",
             r"input=I_ERROR",
             r", exiting\.",
             r"(WARN|warn).*Ignoring HA message.*vote.*not in our membership list",
             r"pengine.*Attempting recovery of resource",
             r"is taking more than 2x its timeout",
             r"Confirm not received from",
             r"Welcome reply not received from",
             r"Attempting to schedule .* after a stop",
             r"Resource .* was active at shutdown",
             r"duplicate entries for call_id",
             r"Search terminated:",
             r":global_timer_callback",
             r"Faking parameter digest creation",
             r"Parameters to .* action changed:",
             r"Parameters to .* changed",
             r"The .* process .* terminated with signal",
             r"Child process .* terminated with signal",
-            r"LogActions:.*Recover",
+            r"pengine:.*Recover .*\(.* -\> .*\)",
             r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
             r"Peer is not part of our cluster",
             r"We appear to be in an election loop",
             r"Unknown node -> we will not deliver message",
-            r"crm_write_blackbox",
+            r"(Blackbox dump requested|Problem detected)",
             r"pacemakerd.*Could not connect to Cluster Configuration Database API",
             r"Receiving messages from a node we think is dead",
             r"share the same cluster nodeid",
             r"share the same name",
 
             #r"crm_ipc_send:.*Request .* failed",
             #r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received",
 
                 # Not inherently bad, but worth tracking
             #r"No need to invoke the TE",
             #r"ping.*: DEBUG: Updated connected = 0",
             #r"Digest mis-match:",
-            r"te_graph_trigger:.*Transition failed: terminated",
-            r"process_ping_reply",
-            r"warn.*:retrieveCib",
+            r"crmd:.*Transition failed: terminated",
+            r"Local CIB .* differs from .*:",
+            r"warn.*:\s*Continuing but .* will NOT be used",
+            r"warn.*:\s*Cluster configuration file .* is corrupt",
             #r"Executing .* fencing operation",
             #r"fence_pcmk.* Call to fence",
             #r"fence_pcmk",
             r"cman killed by node",
             r"Election storm",
             r"stalled the FSA with pending inputs",
         ]
 
 
         self.components["common-ignore"] = [
                     "Pending action:",
                     "error: crm_log_message_adv:",
                     "resources were active at shutdown",
                     "pending LRM operations at shutdown",
                     "Lost connection to the CIB service",
                     "Connection to the CIB terminated...",
                     "Sending message to CIB service FAILED",
                     "apply_xml_diff:.*Diff application failed!",
-                    "crmd.*Action A_RECOVER .* not supported",
+                    r"crmd.*:\s*Action A_RECOVER .* not supported",
                     "unconfirmed_actions:.*Waiting on .* unconfirmed actions",
                     "cib_native_msgready:.*Message pending on command channel",
-                    "crmd.*do_exit:.*Performing A_EXIT_1 - forcefully exiting the CRMd",
+                    r"crmd.*:\s*Performing A_EXIT_1 - forcefully exiting the CRMd",
                     "verify_stopped:.*Resource .* was active at shutdown.  You may ignore this error if it is unmanaged.",
                     "error: attrd_connection_destroy:.*Lost connection to attrd",
-                    "info: te_fence_node:.*Executing .* fencing operation",
-                    "crm_write_blackbox:",
+                    r".*:\s*Executing .* fencing operation \(.*\) on ",
+                    r"(Blackbox dump requested|Problem detected)",
 #                    "error: native_create_actions: Resource .*stonith::.* is active on 2 nodes attempting recovery",
 #                    "error: process_pe_message: Transition .* ERRORs found during PE processing",
             ]
         
         self.components["corosync-ignore"] = [
-            r"error: pcmk_cpg_dispatch:.*Connection to the CPG API failed: Library error",
+            r"error:.*Connection to the CPG API failed: Library error",
             r"The .* process .* exited",
-            r"pacemakerd.*error: pcmk_child_exit:.*Child process .* exited",
-            r"cib.*error: cib_cs_destroy:.*Corosync connection lost",
-            r"stonith-ng.*error: stonith_peer_cs_destroy:.*Corosync connection terminated",
+            r"pacemakerd.*error:.*Child process .* exited",
+            r"cib.*error:.*Corosync connection lost",
+            r"stonith-ng.*error:.*Corosync connection terminated",
             r"The cib process .* exited: Invalid argument",
             r"The attrd process .* exited: Transport endpoint is not connected",
             r"The crmd process .* exited: Link has been severed",
-            r"error: pcmk_child_exit:.*Child process cib .* exited: Invalid argument",
-            r"error: pcmk_child_exit:.*Child process attrd .* exited: Transport endpoint is not connected",
-            r"error: pcmk_child_exit:.*Child process crmd .* exited: Link has been severed",
-            r"lrmd.*error: crm_ipc_read:.*Connection to stonith-ng failed",
-            r"lrmd.*error: mainloop_gio_callback:.*Connection to stonith-ng.* closed",
-            r"lrmd.*error: stonith_connection_destroy_cb:.*LRMD lost STONITH connection",
-            r"crmd.*do_state_transition:.*State transition .* S_RECOVERY",
-            r"crmd.*error: do_log:.*FSA: Input I_ERROR",
-            r"crmd.*error: do_log:.*FSA: Input I_TERMINATE",
-            r"crmd.*error: pcmk_cman_dispatch:.*Connection to cman failed",
-            r"crmd.*error: crmd_fast_exit:.*Could not recover from internal error",
-            r"error: crm_ipc_read:.*Connection to cib_shm failed",
-            r"error: mainloop_gio_callback:.*Connection to cib_shm.* closed",
-            r"error: stonith_connection_failed:.*STONITH connection failed",
+            r"error:.*Child process cib .* exited: Invalid argument",
+            r"error:.*Child process attrd .* exited: Transport endpoint is not connected",
+            r"error:.*Child process crmd .* exited: Link has been severed",
+            r"lrmd.*error:.*Connection to stonith-ng failed",
+            r"lrmd.*error:.*Connection to stonith-ng.* closed",
+            r"lrmd.*error:.*LRMD lost STONITH connection",
+            r"crmd.*State transition .* S_RECOVERY",
+            r"crmd.*error:.*FSA: Input I_ERROR",
+            r"crmd.*error:.*FSA: Input I_TERMINATE",
+            r"crmd.*error:.*Connection to cman failed",
+            r"crmd.*error:.*Could not recover from internal error",
+            r"error:.*Connection to cib_shm failed",
+            r"error:.*Connection to cib_shm.* closed",
+            r"error:.*STONITH connection failed",
             ]
 
         self.components["corosync"] = [
-            r"pacemakerd.*error: cfg_connection_destroy:.*Connection destroyed",
-            r"pacemakerd.*error: mcp_cpg_destroy:.*Connection destroyed",
-            r"crit: attrd_(cs|cpg)_destroy:.*Lost connection to Corosync service",
-            r"stonith_peer_cs_destroy:.*Corosync connection terminated",
-            r"cib_cs_destroy:.*Corosync connection lost!  Exiting.",
-            r"crmd_(cs|quorum)_destroy:.*connection terminated",
+            r"pacemakerd.*error:.*Connection destroyed",
+            r"attrd.*:\s*crit:.*Lost connection to Corosync service",
+            r"stonith.*:\s*Corosync connection terminated",
+            r"cib.*:\s*Corosync connection lost!\s+Exiting.",
+            r"crmd.*:\s*connection terminated",
             r"pengine.*Scheduling Node .* for STONITH",
-            r"tengine_stonith_notify:.*Peer .* was terminated .*: OK",
+            r"crmd.*:\s*Peer %s was terminated \(.*\) by .* for .*:\s*OK",
         ]
 
         self.components["cib-ignore"] = [
             "lrmd.*Connection to stonith-ng failed",
             "lrmd.*Connection to stonith-ng.* closed",
             "lrmd.*LRMD lost STONITH connection",
             "lrmd.*STONITH connection failed, finalizing .* pending operations",
             ]
 
         self.components["cib"] = [
                     "State transition .* S_RECOVERY",
                     "Respawning .* crmd",
                     "Respawning .* attrd",
                     "Connection to cib_.* failed",
                     "Connection to cib_.* closed",
                     "Connection to the CIB terminated...",
                     "(Child process|The) crmd .* exited: Generic Pacemaker error",
                     "(Child process|The) attrd .* exited: (Connection reset by peer|Transport endpoint is not connected)",
                     "Lost connection to CIB service",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.*I_ERROR.*crmd_cib_connection_destroy",
                     "crmd.*Could not recover from internal error",
                     ]
 
         self.components["lrmd"] = [
                     "State transition .* S_RECOVERY",
                     "LRM Connection failed",
                     "Respawning .* crmd",
                     "Connection to lrmd failed",
                     "Connection to lrmd.* closed",
                     "crmd.*I_ERROR.*lrm_connection_destroy",
                     "(Child process|The) crmd .* exited: Generic Pacemaker error",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.*Could not recover from internal error",
                     ]
         self.components["lrmd-ignore"] = []
 
         self.components["crmd"] = [
 #                    "WARN: determine_online_status: Node .* is unclean",
 #                    "Scheduling Node .* for STONITH",
 #                    "Executing .* fencing operation",
 # Only if the node wasn't the DC:  "State transition S_IDLE",
                     "State transition .* -> S_IDLE",
                     ]
         self.components["crmd-ignore"] = []
 
         self.components["attrd"] = []
         self.components["attrd-ignore"] = []
 
         self.components["pengine"] = [
                     "State transition .* S_RECOVERY",
                     "Respawning .* crmd",
                     "(The|Child process) crmd .* exited: Generic Pacemaker error",
                     "Connection to pengine failed",
                     "Connection to pengine.* closed",
                     "Connection to the Policy Engine failed",
                     "crmd.*I_ERROR.*save_cib_contents",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.*Could not recover from internal error",
                     ]
         self.components["pengine-ignore"] = []
 
         self.components["stonith"] = [
             "Connection to stonith-ng failed",
             "LRMD lost STONITH connection",
             "Connection to stonith-ng.* closed",
             "Fencing daemon connection failed",
-            "crmd.*stonith_api_add_notification:.*Callback already present",
+            r"crmd.*:\s*warn.*:\s*Callback already present",
         ]
         self.components["stonith-ignore"] = [
             "LogActions: Recover Fencing",
             "Updating failcount for Fencing",
-            "error: crm_ipc_read: Connection to stonith-ng failed",
-            "error: mainloop_gio_callback: Connection to stonith-ng.*closed (I/O condition=17)",
-            "crit: tengine_stonith_connection_destroy: Fencing daemon connection failed",
-            "error: te_connect_stonith:.*Sign-in failed: triggered a retry",
+            r"error:.*Connection to stonith-ng failed",
+            r"error:.*Connection to stonith-ng.*closed \(I/O condition=17\)",
+            r"crit:.*Fencing daemon connection failed",
+            r"error:.*Sign-in failed: triggered a retry",
             "STONITH connection failed, finalizing .* pending operations.",
-            "process_lrm_event:.*Operation Fencing.* Error",
+            r"crmd.*:\s*Operation Fencing.* Error",
         ]
         self.components["stonith-ignore"].extend(self.components["common-ignore"])
 
 class crm_mcp(crm_cs_v0):
     '''
     The crm version 4 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of native corosync (no plugins)
     '''
     def __init__(self, name):
         crm_cs_v0.__init__(self, name)
 
         self.commands.update({
             "StartCmd"       : "service corosync start && service pacemaker start",
             "StopCmd"        : "service pacemaker stop; service pacemaker_remote stop; service corosync stop",
 
             "EpocheCmd"      : "crm_node -e",
             "QuorumCmd"      : "crm_node -q",
             "ParitionCmd"    : "crm_node -p",
         })
 
         self.search.update({
             # Close enough... "Corosync Cluster Engine exiting normally" isn't printed
             #   reliably and there's little interest in doing anything about it
             "Pat:We_stopped"   : "%s\W.*Unloading all Corosync service engines",
             "Pat:They_stopped" : "%s\W.*crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "crmd.*Node %s\[.*state is now lost",
 
             "Pat:ChildExit"    : "The .* process exited",
             "Pat:ChildKilled"  : "%s\W.*pacemakerd.*The %s process .* terminated with signal 9",
             "Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
         })
 
 #        if self.Env["have_systemd"]:
 #            self.update({
 #                # When systemd is in use, we can look for this instead
 #                "Pat:We_stopped"   : "%s.*Stopped Corosync Cluster Engine",
 #            })
 
 class crm_mcp_docker(crm_mcp):
     '''
     The crm version 4 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of native corosync (no plugins)
     '''
     def __init__(self, name):
         crm_mcp.__init__(self, name)
 
         self.commands.update({
             "StartCmd"       : "pcmk_start",
             "StopCmd"        : "pcmk_stop",
         })
 
 class crm_cman(crm_cs_v0):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of openais
     '''
     def __init__(self, name):
         crm_cs_v0.__init__(self, name)
 
         self.commands.update({
             "StartCmd"       : "service pacemaker start",
             "StopCmd"        : "service pacemaker stop; service pacemaker_remote stop",
 
             "EpocheCmd"      : "crm_node -e --cman",
             "QuorumCmd"      : "crm_node -q --cman",
             "ParitionCmd"    : "crm_node -p --cman",
 
             "Pat:We_stopped"   : "%s.*Unloading all Corosync service engines",
             "Pat:They_stopped" : "%s\W.*crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "crmd.*Node %s\[.*state is now lost",
 
             "Pat:ChildKilled"  : "%s\W.*pacemakerd.*The %s process .* terminated with signal 9",
             "Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning failed child process: %s",
         })
 
 
 class PatternSelector:
 
     def __init__(self, name=None):
         self.name = name
         self.base = BasePatterns("crm-base")
 
         if not name:
             crm_cs_v0("crm-plugin-v0")
             crm_cman("crm-cman")
             crm_mcp("crm-mcp")
             crm_lha("crm-lha")
         elif name == "crm-lha":
             crm_lha(name)
         elif name == "crm-plugin-v0":
             crm_cs_v0(name)
         elif name == "crm-cman":
             crm_cman(name)
         elif name == "crm-mcp":
             crm_mcp(name)
         elif name == "crm-mcp-docker":
             crm_mcp_docker(name)
 
     def get_variant(self, variant):
         if patternvariants.has_key(variant):
             return patternvariants[variant]
         print "defaulting to crm-base for %s" % variant
         return self.base
 
     def get_patterns(self, variant, kind):
         return self.get_variant(variant).get_patterns(kind)
 
     def get_template(self, variant, key):
         v = self.get_variant(variant)
         return v[key]
 
     def get_component(self, variant, kind):
         return self.get_variant(variant).get_component(kind)
 
     def __getitem__(self, key):
         return self.get_template(self.name, key)
 
 # python cts/CTSpatt.py -k crm-mcp -t StartCmd
 if __name__ == '__main__':
 
     pdir=os.path.dirname(sys.path[0])
     sys.path.insert(0, pdir) # So that things work from the source directory
 
-    from cts.CTSvars   import *
-
     kind=None
     template=None
 
     skipthis=None
     args=sys.argv[1:]
     for i in range(0, len(args)):
        if skipthis:
            skipthis=None
            continue
 
        elif args[i] == "-k" or args[i] == "--kind":
            skipthis=1
            kind = args[i+1]
 
        elif args[i] == "-t" or args[i] == "--template":
            skipthis=1
            template = args[i+1]
 
        else:
            print "Illegal argument " + args[i]
 
 
     print PatternSelector(kind)[template]
diff --git a/cts/remote.py b/cts/remote.py
index 7920fc9756..b32b028e3f 100644
--- a/cts/remote.py
+++ b/cts/remote.py
@@ -1,276 +1,271 @@
 '''
 Classes related to running command remotely
 '''
 
 __copyright__='''
 Copyright (C) 2014 Andrew Beekhof <andrew@beekhof.net>
 Licensed under the GNU GPL.
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
-import types, string, select, sys, time, re, os, struct, signal
-import time, syslog, random, traceback, base64, pickle, binascii, fcntl
+import string, sys, re, os
 
-from cts.logging import LogFactory 
-
-from socket import gethostbyname_ex
-from UserDict import UserDict
 from subprocess import Popen,PIPE
+from threading import Thread
 
 pdir=os.path.dirname(sys.path[0])
 sys.path.insert(0, pdir) # So that things work from the source directory
 
 from cts.CTSvars import *
 from cts.logging import *
-from threading import Thread
 
 trace_rsh=None
 trace_lw=None
 
 class AsyncWaitProc(Thread):
     def __init__(self, proc, node, command, completionDelegate=None):
         self.proc = proc
         self.node = node
         self.command = command
         self.logger = LogFactory()
         self.delegate = completionDelegate;
         Thread.__init__(self)
 
     def run(self):
         outLines = None
         errLines = None
         self.logger.debug("cmd: async: target=%s, pid=%d: %s" % (self.node, self.proc.pid, self.command))
 
         self.proc.wait()
         self.logger.debug("cmd: pid %d returned %d" % (self.proc.pid, self.proc.returncode))
 
         if self.proc.stderr:
             errLines = self.proc.stderr.readlines()
             self.proc.stderr.close()
             for line in errLines:
                 self.logger.debug("cmd: stderr[%d]: %s" % (self.proc.pid, line))
 
         if self.proc.stdout:
             outLines = self.proc.stdout.readlines()
             self.proc.stdout.close()
 #            for line in outLines:
 #                self.logger.debug("cmd: stdout[%d]: %s" % (self.proc.pid, line))
 
         if self.delegate:
             self.delegate.async_complete(self.proc.pid, self.proc.returncode, outLines, errLines)
 
 class AsyncRemoteCmd(Thread):
     def __init__(self, node, command, completionDelegate=None):
         self.proc = None
         self.node = node
         self.command = command
         self.logger = LogFactory()
         self.delegate = completionDelegate;
         Thread.__init__(self)
 
     def run(self):
         outLines = None
         errLines = None
 
         self.proc = Popen(self.command, stdout = PIPE, stderr = PIPE, close_fds = True, shell = True)
 
         self.logger.debug("cmd: async: target=%s, pid=%d: %s" % (self.node, self.proc.pid, self.command))
         self.proc.wait()
         self.logger.debug("cmd: pid %d returned %d to %s" % (self.proc.pid, self.proc.returncode, repr(self.delegate)))
 
         if self.proc.stderr:
             errLines = self.proc.stderr.readlines()
             self.proc.stderr.close()
             for line in errLines:
                 self.logger.debug("cmd: stderr[%d]: %s" % (self.proc.pid, line))
 
         if self.proc.stdout:
             outLines = self.proc.stdout.readlines()
             self.proc.stdout.close()
 #            for line in outLines:
 #                self.logger.log("cmd: stdout[%d]: %s" % (self.proc.pid, line))
 
         if self.delegate:
             self.delegate.async_complete(self.proc.pid, self.proc.returncode, outLines, errLines)
 
 class RemotePrimitives:
     def __init__(self, Command=None, CpCommand=None):
         if CpCommand:
             self.CpCommand = CpCommand
         else:
             #        -B: batch mode, -q: no stats (quiet)
             self.CpCommand = "scp -B -q"
 
         if Command:
             self.Command = Command
         else:
             #   -n: no stdin, -x: no X11,
             #   -o ServerAliveInterval=5 disconnect after 3*5s if the server stops responding
             self.Command = "ssh -l root -n -x -o ServerAliveInterval=5 -o ConnectTimeout=10 -o TCPKeepAlive=yes -o ServerAliveCountMax=3 "
 
 class RemoteExec:
     '''This is an abstract remote execution class.  It runs a command on another
        machine - somehow.  The somehow is up to us.  This particular
        class uses ssh.
        Most of the work is done by fork/exec of ssh or scp.
     '''
 
     def __init__(self, rsh, silent=False):
-        print repr(self)
         self.async = []
         self.rsh = rsh
         self.silent = silent
         self.logger = LogFactory()
 
         if trace_rsh:
             self.silent = False
 
         self.OurNode=string.lower(os.uname()[1])
 
     def _fixcmd(self, cmd):
         return re.sub("\'", "'\\''", cmd)
 
     def _cmd(self, *args):
 
         '''Compute the string that will run the given command on the
         given remote system'''
 
         args= args[0]
         sysname = args[0]
         command = args[1]
 
         #print "sysname: %s, us: %s" % (sysname, self.OurNode)
         if sysname == None or string.lower(sysname) == self.OurNode or sysname == "localhost":
             ret = command
         else:
             ret = self.rsh.Command + " " + sysname + " '" + self._fixcmd(command) + "'"
         #print ("About to run %s\n" % ret)
         return ret
 
     def log(self, args):
         if not self.silent:
             self.logger.log(args)
 
     def debug(self, args):
         if not self.silent:
             self.logger.debug(args)
 
     def call_async(self, node, command, completionDelegate=None):
         #if completionDelegate: print "Waiting for %d on %s: %s" % (proc.pid, node, command)
         aproc = AsyncRemoteCmd(node, self._cmd([node, command]), completionDelegate=completionDelegate)
         aproc.start()
         return aproc
 
 
     def __call__(self, node, command, stdout=0, synchronous=1, silent=False, blocking=True, completionDelegate=None):
         '''Run the given command on the given remote system
         If you call this class like a function, this is the function that gets
         called.  It just runs it roughly as though it were a system() call
         on the remote machine.  The first argument is name of the machine to
         run it on.
         '''
 
         if trace_rsh:
             silent = False
 
         rc = 0
         result = None
         proc = Popen(self._cmd([node, command]),
                      stdout = PIPE, stderr = PIPE, close_fds = True, shell = True)
 
         #if completionDelegate: print "Waiting for %d on %s: %s" % (proc.pid, node, command)
         if not synchronous and proc.pid > 0 and not self.silent:
             aproc = AsyncWaitProc(proc, node, command, completionDelegate=completionDelegate)
             aproc.start()
             return 0
 
         #if not blocking:
+        #    import fcntl
         #    fcntl.fcntl(proc.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
 
         if proc.stdout:
             if stdout == 1:
                 result = proc.stdout.readline()
             else:
                 result = proc.stdout.readlines()
             proc.stdout.close()
         else:
             self.log("No stdout stream")
 
         rc = proc.wait()
 
         if not silent: self.debug("cmd: target=%s, rc=%d: %s" % (node, rc, command))
         if stdout == 1:
             return result
 
         if proc.stderr:
             errors = proc.stderr.readlines()
             proc.stderr.close()
 
         if completionDelegate:
             completionDelegate.async_complete(proc.pid, proc.returncode, result, errors)
 
         if not silent:
             for err in errors:
                 if stdout == 3:
                     result.append("error: "+err)
                 else:
                     self.debug("cmd: stderr: %s" % err)
 
         if stdout == 0:
             if not silent and result:
                 for line in result:
                     self.debug("cmd: stdout: %s" % line)
             return rc
 
         return (rc, result)
 
     def cp(self, source, target, silent=False):
         '''Perform a remote copy'''
         cpstring = self.rsh.CpCommand  + " \'" + source + "\'"  + " \'" + target + "\'"
         rc = os.system(cpstring)
         if trace_rsh:
             silent = False
         if not silent: self.debug("cmd: rc=%d: %s" % (rc, cpstring))
 
         return rc
 
 class RemoteFactory:
     # Class variables
     rsh = RemotePrimitives()
     instance = None
 
     def getInstance(self):
         if not RemoteFactory.instance:
             RemoteFactory.instance = RemoteExec(RemoteFactory.rsh, False)
         return RemoteFactory.instance
 
     def new(self, silent=False):
         return RemoteExec(RemoteFactory.rsh, silent)
 
     def enable_docker(self):
         print "Using DOCKER backend for connections to cluster nodes"
 
         RemoteFactory.rsh.Command = "/usr/libexec/phd/docker/phd_docker_remote_cmd "
         RemoteFactory.rsh.CpCommand = "/usr/libexec/phd/docker/phd_docker_cp"
 
     def enable_qarsh(self):
         # http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/
         print "Using QARSH for connections to cluster nodes"
 
         RemoteFactory.rsh.Command = "qarsh -t 300 -l root"
         RemoteFactory.rsh.CpCommand = "qacp -q"
 
diff --git a/cts/watcher.py b/cts/watcher.py
index 41e4100cbd..c4ea1b02fe 100644
--- a/cts/watcher.py
+++ b/cts/watcher.py
@@ -1,549 +1,548 @@
 '''
 Classes related to searching logs
 '''
 
 __copyright__='''
 Copyright (C) 2014 Andrew Beekhof <andrew@beekhof.net>
 Licensed under the GNU GPL.
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
-import types, string, select, sys, time, re, os, struct, signal
-import time, syslog, random, traceback, base64, pickle, binascii, fcntl
-import threading
-
+import time, re, os, threading
 
 from cts.remote import *
 from cts.logging import *
 
 has_log_watcher = {}
 log_watcher_file = "cts_log_watcher.py"
 log_watcher_bin = CTSvars.CRM_DAEMON_DIR + "/" + log_watcher_file
 log_watcher = """
 import sys, os, fcntl
 
 '''
 Remote logfile reader for CTS
 Reads a specified number of lines from the supplied offset
 Returns the current offset
 
 Contains logic for handling truncation
 '''
 
 limit    = 0
 offset   = 0
 prefix   = ''
 filename = '/var/log/messages'
 
 skipthis=None
 args=sys.argv[1:]
 for i in range(0, len(args)):
     if skipthis:
         skipthis=None
         continue
 
     elif args[i] == '-l' or args[i] == '--limit':
         skipthis=1
         limit = int(args[i+1])
 
     elif args[i] == '-f' or args[i] == '--filename':
         skipthis=1
         filename = args[i+1]
 
     elif args[i] == '-o' or args[i] == '--offset':
         skipthis=1
         offset = args[i+1]
 
     elif args[i] == '-p' or args[i] == '--prefix':
         skipthis=1
         prefix = args[i+1]
 
     elif args[i] == '-t' or args[i] == '--tag':
         skipthis=1
 
 if not os.access(filename, os.R_OK):
     print prefix + 'Last read: %d, limit=%d, count=%d - unreadable' % (0, limit, 0)
     sys.exit(1)
 
 logfile=open(filename, 'r')
 logfile.seek(0, os.SEEK_END)
 newsize=logfile.tell()
 
 if offset != 'EOF':
     offset = int(offset)
     if newsize >= offset:
         logfile.seek(offset)
     else:
         print prefix + ('File truncated from %d to %d' % (offset, newsize))
         if (newsize*1.05) < offset:
             logfile.seek(0)
         # else: we probably just lost a few logs after a fencing op
         #       continue from the new end
         # TODO: accept a timestamp and discard all messages older than it
 
 # Don't block when we reach EOF
 fcntl.fcntl(logfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
 
 count = 0
 while True:
     if logfile.tell() >= newsize:   break
     elif limit and count >= limit: break
 
     line = logfile.readline()
     if not line: break
 
     print line.strip()
     count += 1
 
 print prefix + 'Last read: %d, limit=%d, count=%d' % (logfile.tell(), limit, count)
 logfile.close()
 """
 
 class SearchObj:
     def __init__(self, filename, host=None, name=None):
 
         self.limit = None
         self.cache = []
         self.logger = LogFactory()
         self.host = host
         self.name = name
         self.filename = filename
         self.rsh = RemoteFactory().getInstance()
 
         self.offset = "EOF"
 
         if host == None:
             host = "localhost"
 
     def __str__(self):
         if self.host:
             return "%s:%s" % (self.host, self.filename)
         return self.filename
 
     def log(self, args):
         message = "lw: %s: %s" % (self, args)
         self.logger.log(message)
 
     def debug(self, args):
         message = "lw: %s: %s" % (self, args)
         self.logger.debug(message)
 
     def harvest(self, delegate=None):
         async = self.harvest_async(delegate)
         async.join()
 
     def harvest_async(self, delegate=None):
         self.log("Not implemented")
         raise
 
     def end(self):
         self.debug("Unsetting the limit")
         # Unset the limit
         self.limit = None
 
 class FileObj(SearchObj):
     def __init__(self, filename, host=None, name=None):
         global has_log_watcher
         SearchObj.__init__(self, filename, host, name)
 
-        if not has_log_watcher.has_key(host):
+        if host is not None:
+            if not has_log_watcher.has_key(host):
 
-            global log_watcher
-            global log_watcher_bin
+                global log_watcher
+                global log_watcher_bin
 
-            self.debug("Installing %s on %s" % (log_watcher_file, host))
+                self.debug("Installing %s on %s" % (log_watcher_file, host))
 
-            os.system("cat << END >> %s\n%s\nEND" %(log_watcher_file, log_watcher))
-            os.system("chmod 755 %s" %(log_watcher_file))
+                os.system("cat << END >> %s\n%s\nEND" %(log_watcher_file, log_watcher))
+                os.system("chmod 755 %s" %(log_watcher_file))
 
-            self.rsh.cp(log_watcher_file, "root@%s:%s" % (host, log_watcher_bin))
-            has_log_watcher[host] = 1
+                self.rsh.cp(log_watcher_file, "root@%s:%s" % (host, log_watcher_bin))
+                has_log_watcher[host] = 1
 
-            os.system("rm -f %s" %(log_watcher_file))
+                os.system("rm -f %s" %(log_watcher_file))
 
-        self.harvest()
+            self.harvest()
 
     def async_complete(self, pid, returncode, outLines, errLines):
         for line in outLines:
             match = re.search("^CTSwatcher:Last read: (\d+)", line)
             if match:
                 last_offset = self.offset
                 self.offset = match.group(1)
                 #if last_offset == "EOF": self.debug("Got %d lines, new offset: %s" % (len(outLines), self.offset))
                 self.debug("Got %d lines, new offset: %s  %s" % (len(outLines), self.offset, repr(self.delegate)))
 
             elif re.search("^CTSwatcher:.*truncated", line):
                 self.log(line)
             elif re.search("^CTSwatcher:", line):
                 self.debug("Got control line: "+ line)
             else:
                 self.cache.append(line)
 
         if self.delegate:
             self.delegate.async_complete(pid, returncode, self.cache, errLines)
 
     def harvest_async(self, delegate=None):
         self.delegate = delegate
         self.cache = []
 
         if self.limit != None and self.offset > self.limit:
             if self.delegate:
                 self.delegate.async_complete(-1, -1, [], [])
             return None
 
         global log_watcher_bin
         return self.rsh.call_async(self.host,
-                                   "python %s -t %s -p CTSwatcher: -l 200 -f %s -o %s -t %s" % (log_watcher_bin, self.name, self.filename, self.offset, self.name),
+                                   "python %s -t %s -p CTSwatcher: -l 200 -f %s -o %s" % (log_watcher_bin, self.name, self.filename, self.offset),
                 completionDelegate=self)
 
     def setend(self):
         if self.limit: 
             return
 
         global log_watcher_bin
         (rc, lines) = self.rsh(self.host,
-                               "python %s -t %s -p CTSwatcher: -l 2 -f %s -o %s -t %s" % (log_watcher_bin, self.name, self.filename, "EOF", self.name),
+                               "python %s -t %s -p CTSwatcher: -l 2 -f %s -o %s" % (log_watcher_bin, self.name, self.filename, "EOF"),
                  None, silent=True)
 
         for line in lines:
             match = re.search("^CTSwatcher:Last read: (\d+)", line)
             if match:
                 last_offset = self.offset
                 self.limit = int(match.group(1))
                 #if last_offset == "EOF": self.debug("Got %d lines, new offset: %s" % (len(lines), self.offset))
                 self.debug("Set limit to: %d" % self.limit)
 
         return
 
 class JournalObj(SearchObj):
 
     def __init__(self, host=None, name=None):
         SearchObj.__init__(self, name, host, name)
         self.harvest()
 
     def async_complete(self, pid, returncode, outLines, errLines):
         #self.log( "%d returned on %s" % (pid, self.host))
         foundCursor = False
         for line in outLines:
             match = re.search("^-- cursor: ([^.]+)", line)
             if match:
                 foundCursor = True
                 last_offset = self.offset
                 self.offset = match.group(1).strip()
                 self.debug("Got %d lines, new cursor: %s" % (len(outLines), self.offset))
             else:
                 self.cache.append(line)
 
         if self.limit and not foundCursor:
             self.hitLimit = True
             self.debug("Got %d lines but no cursor: %s" % (len(outLines), self.offset))
             
             # Get the current cursor
             (rc, outLines) = self.rsh(self.host, "journalctl -q -n 0 --show-cursor", stdout=None, silent=True, synchronous=True)
             for line in outLines:
                 match = re.search("^-- cursor: ([^.]+)", line)
                 if match:
                     last_offset = self.offset
                     self.offset = match.group(1).strip()
                     self.debug("Got %d lines, new cursor: %s" % (len(outLines), self.offset))
                 else:
                     self.log("Not a new cursor: %s" % line)
                     self.cache.append(line)
 
         if self.delegate:
             self.delegate.async_complete(pid, returncode, self.cache, errLines)
 
     def harvest_async(self, delegate=None):
         self.delegate = delegate
         self.cache = []
 
         # Use --lines to prevent journalctl from overflowing the Popen input buffer
         if self.limit and self.hitLimit:
             return None
 
         elif self.limit:
             command = "journalctl -q --after-cursor='%s' --until '%s' --lines=200 --show-cursor" % (self.offset, self.limit)
         else:
             command = "journalctl -q --after-cursor='%s' --lines=200 --show-cursor" % (self.offset)
 
         if self.offset == "EOF":
             command = "journalctl -q -n 0 --show-cursor"
 
         return self.rsh.call_async(self.host, command, completionDelegate=self)
 
     def setend(self):
         if self.limit: 
             return
 
         self.hitLimit = False
         (rc, lines) = self.rsh(self.host, "date +'%Y-%m-%d %H:%M:%S'", stdout=None, silent=True)
 
         for line in lines:
             self.limit = line.strip()
             self.debug("Set limit to: %s" % self.limit)
 
 
         return
 
 class LogWatcher(RemoteExec):
 
     '''This class watches logs for messages that fit certain regular
        expressions.  Watching logs for events isn't the ideal way
        to do business, but it's better than nothing :-)
 
        On the other hand, this class is really pretty cool ;-)
 
        The way you use this class is as follows:
           Construct a LogWatcher object
           Call setwatch() when you want to start watching the log
           Call look() to scan the log looking for the patterns
     '''
 
     def __init__(self, log, regexes, name="Anon", timeout=10, debug_level=None, silent=False, hosts=None, kind=None):
         '''This is the constructor for the LogWatcher class.  It takes a
         log name to watch, and a list of regular expressions to watch for."
         '''
         self.logger = LogFactory()
 
         self.name        = name
         self.regexes     = regexes
         self.debug_level = debug_level
         self.whichmatch  = -1
         self.unmatched   = None
         self.cache_lock = threading.Lock()
 
         self.file_list = []
         self.line_cache = []
 
         #  Validate our arguments.  Better sooner than later ;-)
         for regex in regexes:
             assert re.compile(regex)
 
         if kind:
             self.kind    = kind
         else:
             raise
             self.kind    = self.Env["LogWatcher"]
 
         if log:
             self.filename    = log
         else:
             raise
             self.filename    = self.Env["LogFileName"]
 
         if hosts:
             self.hosts = hosts
         else:
             raise
             self.hosts = self.Env["nodes"]
 
         if trace_lw:
             self.debug_level = 3
             silent = False
 
         if not silent:
             for regex in self.regexes:
                 self.debug("Looking for regex: "+regex)
 
         self.Timeout = int(timeout)
         self.returnonlymatch = None
 
     def debug(self, args):
         message = "lw: %s: %s" % (self.name, args)
         self.logger.debug(message)
 
     def setwatch(self):
         '''Mark the place to start watching the log from.
         '''
 
         if self.kind == "remote":
             for node in self.hosts:
                 self.file_list.append(FileObj(self.filename, node, self.name))
 
         elif self.kind == "journal":
             for node in self.hosts:
                 self.file_list.append(JournalObj(node, self.name))
 
         else:
             self.file_list.append(FileObj(self.filename))
 
         # print "%s now has %d files" % (self.name, len(self.file_list))
 
     def __del__(self):
         if self.debug_level > 1: self.debug("Destroy")
 
     def ReturnOnlyMatch(self, onlymatch=1):
         '''Specify one or more subgroups of the match to return rather than the whole string
            http://www.python.org/doc/2.5.2/lib/match-objects.html
         '''
         self.returnonlymatch = onlymatch
 
     def async_complete(self, pid, returncode, outLines, errLines):
         # TODO: Probably need a lock for updating self.line_cache
         self.logger.debug("%s: Got %d lines from %d (total %d)" % (self.name, len(outLines), pid, len(self.line_cache)))
         if len(outLines):
             self.cache_lock.acquire()
             self.line_cache.extend(outLines)
             self.cache_lock.release()
 
     def __get_lines(self, timeout):
         count=0
         if not len(self.file_list):
             raise ValueError("No sources to read from")
 
         pending = []
         #print "%s waiting for %d operations" % (self.name, self.pending)
         for f in self.file_list:
             t = f.harvest_async(self)
             if t:
                 pending.append(t)
 
         for t in pending:
             t.join(60.0)
             if t.isAlive():
                 self.logger.log("%s: Aborting after 20s waiting for %s logging commands" % (self.name, repr(t)))
                 return
 
         #print "Got %d lines" % len(self.line_cache)
 
     def end(self):
         for f in self.file_list:
             f.end()
 
     def look(self, timeout=None, silent=False):
         '''Examine the log looking for the given patterns.
         It starts looking from the place marked by setwatch().
         This function looks in the file in the fashion of tail -f.
         It properly recovers from log file truncation, but not from
         removing and recreating the log.  It would be nice if it
         recovered from this as well :-)
 
         We return the first line which matches any of our patterns.
         '''
         if timeout == None: timeout = self.Timeout
 
         if trace_lw:
             silent = False
 
         lines=0
         needlines=True
         begin=time.time()
         end=begin+timeout+1
         if self.debug_level > 2: self.debug("starting single search: timeout=%d, begin=%d, end=%d" % (timeout, begin, end))
 
         if not self.regexes:
             self.debug("Nothing to look for")
             return None
 
         if timeout == 0:
             for f in self.file_list:
                 f.setend()
 
         while True:
             if len(self.line_cache):
                 lines += 1
 
                 self.cache_lock.acquire()
                 line = self.line_cache[0]
                 self.line_cache.remove(line)
                 self.cache_lock.release()
 
                 which=-1
                 if re.search("CTS:", line):
                     continue
                 if self.debug_level > 2: self.debug("Processing: "+ line)
                 for regex in self.regexes:
                     which=which+1
                     if self.debug_level > 3: self.debug("Comparing line to: "+ regex)
+                    #import string
                     #matchobj = re.search(string.lower(regex), string.lower(line))
                     matchobj = re.search(regex, line)
                     if matchobj:
                         self.whichmatch=which
                         if self.returnonlymatch:
                             return matchobj.group(self.returnonlymatch)
                         else:
                             self.debug("Matched: "+line)
                             if self.debug_level > 1: self.debug("With: "+ regex)
                             return line
 
             elif timeout > 0 and end < time.time():
                 if self.debug_level > 1: self.debug("hit timeout: %d" % timeout)
 
                 timeout = 0
                 for f in self.file_list:
                     f.setend()
 
             else:
                 self.__get_lines(timeout)
                 if len(self.line_cache) == 0 and end < time.time():
                     self.debug("Single search terminated: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), lines))
                     return None
                 else:
                     self.debug("Waiting: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), len(self.line_cache)))
                     time.sleep(1)
 
         self.debug("How did we get here")
         return None
 
     def lookforall(self, timeout=None, allow_multiple_matches=None, silent=False):
         '''Examine the log looking for ALL of the given patterns.
         It starts looking from the place marked by setwatch().
 
         We return when the timeout is reached, or when we have found
         ALL of the regexes that were part of the watch
         '''
 
         if timeout == None: timeout = self.Timeout
         save_regexes = self.regexes
         returnresult = []
 
         if trace_lw:
             silent = False
 
         if not silent:
             self.debug("starting search: timeout=%d" % timeout)
             for regex in self.regexes:
                 if self.debug_level > 2: self.debug("Looking for regex: "+regex)
 
         while (len(self.regexes) > 0):
             oneresult = self.look(timeout)
             if not oneresult:
                 self.unmatched = self.regexes
                 self.matched = returnresult
                 self.regexes = save_regexes
                 self.end()
                 return None
 
             returnresult.append(oneresult)
             if not allow_multiple_matches:
                 del self.regexes[self.whichmatch]
 
             else:
                 # Allow multiple regexes to match a single line
                 tmp_regexes = self.regexes
                 self.regexes = []
                 which = 0
                 for regex in tmp_regexes:
                     matchobj = re.search(regex, oneresult)
                     if not matchobj:
                         self.regexes.append(regex)
 
         self.unmatched = None
         self.matched = returnresult
         self.regexes = save_regexes
         return returnresult