diff --git a/cts/lab/CTStests.py b/cts/lab/CTStests.py
index d7f0cd1f0b..1be0ad6ab9 100644
--- a/cts/lab/CTStests.py
+++ b/cts/lab/CTStests.py
@@ -1,3178 +1,3178 @@
 """ Test-specific classes for Pacemaker's Cluster Test Suite (CTS)
 """
 
 __copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 #
 #        SPECIAL NOTE:
 #
 #        Tests may NOT implement any cluster-manager-specific code in them.
 #        EXTEND the ClusterManager object to provide the base capabilities
 #        the test needs if you need to do something that the current CM classes
 #        do not.  Otherwise you screw up the whole point of the object structure
 #        in CTS.
 #
 #                Thank you.
 #
 
 import os
 import re
 import time
 import subprocess
 import tempfile
 
 from stat import *
 from cts import CTS
 from cts.CTSaudits import *
 from cts.watcher   import LogWatcher
 
 from pacemaker import BuildOptions
 from pacemaker._cts.environment import EnvFactory
 from pacemaker._cts.logging import LogFactory
 from pacemaker._cts.patterns import PatternSelector
 from pacemaker._cts.remote import RemoteFactory
 
 AllTestClasses = [ ]
 
 
 class CTSTest(object):
     '''
     A Cluster test.
     We implement the basic set of properties and behaviors for a generic
     cluster test.
 
     Cluster tests track their own statistics.
     We keep each of the kinds of counts we track as separate {name,value}
     pairs.
     '''
 
     def __init__(self, cm):
         #self.name="the unnamed test"
         self.Stats = {"calls":0
         ,        "success":0
         ,        "failure":0
         ,        "skipped":0
         ,        "auditfail":0}
 
 #        if not issubclass(cm.__class__, ClusterManager):
 #            raise ValueError("Must be a ClusterManager object")
         self.CM = cm
         self.Env = EnvFactory().getInstance()
         self.rsh = RemoteFactory().getInstance()
         self.logger = LogFactory()
         self.templates = PatternSelector(cm["Name"])
         self.Audits = []
         self.timeout = 120
         self.passed = 1
         self.is_loop = 0
         self.is_unsafe = 0
         self.is_experimental = 0
         self.is_container = 0
         self.is_valgrind = 0
         self.benchmark = 0  # which tests to benchmark
         self.timer = {}  # timers
 
     def log(self, args):
         self.logger.log(args)
 
     def debug(self, args):
         self.logger.debug(args)
 
     def has_key(self, key):
         return key in self.Stats
 
     def __setitem__(self, key, value):
         self.Stats[key] = value
 
     def __getitem__(self, key):
         if str(key) == "0":
             raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead")
 
         if key in self.Stats:
             return self.Stats[key]
         return None
 
     def log_mark(self, msg):
         self.debug("MARK: test %s %s %d" % (self.name,msg,time.time()))
         return
 
     def get_timer(self,key = "test"):
         try: return self.timer[key]
         except: return 0
 
     def set_timer(self,key = "test"):
         self.timer[key] = time.time()
         return self.timer[key]
 
     def log_timer(self,key = "test"):
         elapsed = 0
         if key in self.timer:
             elapsed = time.time() - self.timer[key]
             s = key == "test" and self.name or "%s:%s" % (self.name,key)
             self.debug("%s runtime: %.2f" % (s, elapsed))
             del self.timer[key]
         return elapsed
 
     def incr(self, name):
         '''Increment (or initialize) the value associated with the given name'''
         if not name in self.Stats:
             self.Stats[name] = 0
         self.Stats[name] = self.Stats[name]+1
 
         # Reset the test passed boolean
         if name == "calls":
             self.passed = 1
 
     def failure(self, reason="none"):
         '''Increment the failure count'''
         self.passed = 0
         self.incr("failure")
         self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason)
         return None
 
     def success(self):
         '''Increment the success count'''
         self.incr("success")
         return 1
 
     def skipped(self):
         '''Increment the skipped count'''
         self.incr("skipped")
         return 1
 
     def __call__(self, node):
         '''Perform the given test'''
         raise ValueError("Abstract Class member (__call__)")
         self.incr("calls")
         return self.failure()
 
     def audit(self):
         passed = 1
         if len(self.Audits) > 0:
             for audit in self.Audits:
                 if not audit():
                     self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name()))
                     self.incr("auditfail")
                     passed = 0
         return passed
 
     def setup(self, node):
         '''Setup the given test'''
         return self.success()
 
     def teardown(self, node):
         '''Tear down the given test'''
         return self.success()
 
     def create_watch(self, patterns, timeout, name=None):
         if not name:
             name = self.name
         return LogWatcher(self.Env["LogFileName"], patterns, name, timeout, kind=self.Env["LogWatcher"], hosts=self.Env["nodes"])
 
     def local_badnews(self, prefix, watch, local_ignore=[]):
         errcount = 0
         if not prefix:
             prefix = "LocalBadNews:"
 
         ignorelist = []
         ignorelist.append(" CTS: ")
         ignorelist.append(prefix)
         ignorelist.extend(local_ignore)
 
         while errcount < 100:
             match = watch.look(0)
             if match:
                add_err = 1
                for ignore in ignorelist:
                    if add_err == 1 and re.search(ignore, match):
                        add_err = 0
                if add_err == 1:
                    self.logger.log(prefix + " " + match)
                    errcount = errcount + 1
             else:
               break
         else:
             self.logger.log("Too many errors!")
 
         watch.end()
         return errcount
 
     def is_applicable(self):
         return self.is_applicable_common()
 
     def is_applicable_common(self):
-        '''Return TRUE if we are applicable in the current test configuration'''
+        '''Return True if we are applicable in the current test configuration'''
         #raise ValueError("Abstract Class member (is_applicable)")
 
         if self.is_loop and not self.Env["loop-tests"]:
-            return 0
+            return False
         elif self.is_unsafe and not self.Env["unsafe-tests"]:
-            return 0
+            return False
         elif self.is_valgrind and not self.Env["valgrind-tests"]:
-            return 0
+            return False
         elif self.is_experimental and not self.Env["experimental-tests"]:
-            return 0
+            return False
         elif self.is_container and not self.Env["container-tests"]:
-            return 0
+            return False
         elif self.Env["benchmark"] and self.benchmark == 0:
-            return 0
+            return False
 
-        return 1
+        return True
 
     def find_ocfs2_resources(self, node):
         self.r_o2cb = None
         self.r_ocfs2 = []
 
         (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rtype == "o2cb" and r.parent != "NA":
                     self.debug("Found o2cb: %s" % self.r_o2cb)
                     self.r_o2cb = r.parent
             if re.search("^Constraint", line):
                 c = AuditConstraint(self.CM, line)
                 if c.type == "rsc_colocation" and c.target == self.r_o2cb:
                     self.r_ocfs2.append(c.rsc)
 
         self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2))
         return len(self.r_ocfs2)
 
     def canrunnow(self, node):
         '''Return TRUE if we can meaningfully run right now'''
         return 1
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return []
 
 
 class StopTest(CTSTest):
     '''Stop (deactivate) the cluster manager on a node'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "Stop"
 
     def __call__(self, node):
         '''Perform the 'stop' test. '''
         self.incr("calls")
         if self.CM.ShouldBeStatus[node] != "up":
             return self.skipped()
 
         patterns = []
         # Technically we should always be able to notice ourselves stopping
         patterns.append(self.templates["Pat:We_stopped"] % node)
 
         # Any active node needs to notice this one left
         # (note that this won't work if we have multiple partitions)
         for other in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[other] == "up" and other != node:
                 patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node)))
                 #self.debug("Checking %s will notice %s left"%(other, node))
 
         watch = self.create_watch(patterns, self.Env["DeadTime"])
         watch.setwatch()
 
         if node == self.CM.OurNode:
             self.incr("us")
         else:
             if self.CM.upcount() <= 1:
                 self.incr("all")
             else:
                 self.incr("them")
 
         self.CM.StopaCM(node)
         watch_result = watch.lookforall()
 
         failreason = None
         UnmatchedList = "||"
         if watch.unmatched:
             (_, output) = self.rsh(node, "/bin/ps axf", verbose=1)
             for line in output:
                 self.debug(line)
 
             (_, output) = self.rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", verbose=1)
             for line in output:
                 self.debug(line)
 
             for regex in watch.unmatched:
                 self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex))
                 UnmatchedList +=  regex + "||";
                 failreason = "Missing shutdown pattern"
 
         self.CM.cluster_stable(self.Env["DeadTime"])
 
         if not watch.unmatched or self.CM.upcount() == 0:
             return self.success()
 
         if len(watch.unmatched) >= self.CM.upcount():
             return self.failure("no match against (%s)" % UnmatchedList)
 
         if failreason == None:
             return self.success()
         else:
             return self.failure(failreason)
 #
 # We don't register StopTest because it's better when called by
 # another test...
 #
 
 
 class StartTest(CTSTest):
     '''Start (activate) the cluster manager on a node'''
     def __init__(self, cm, debug=None):
         CTSTest.__init__(self,cm)
         self.name = "start"
         self.debug = debug
 
     def __call__(self, node):
         '''Perform the 'start' test. '''
         self.incr("calls")
 
         if self.CM.upcount() == 0:
             self.incr("us")
         else:
             self.incr("them")
 
         if self.CM.ShouldBeStatus[node] != "down":
             return self.skipped()
         elif self.CM.StartaCM(node):
             return self.success()
         else:
             return self.failure("Startup %s on node %s failed"
                                 % (self.Env["Name"], node))
 
 #
 # We don't register StartTest because it's better when called by
 # another test...
 #
 
 
 class FlipTest(CTSTest):
     '''If it's running, stop it.  If it's stopped start it.
        Overthrow the status quo...
     '''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Flip"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
 
     def __call__(self, node):
         '''Perform the 'Flip' test. '''
         self.incr("calls")
         if self.CM.ShouldBeStatus[node] == "up":
             self.incr("stopped")
             ret = self.stop(node)
             type = "up->down"
             # Give the cluster time to recognize it's gone...
             time.sleep(self.Env["StableTime"])
         elif self.CM.ShouldBeStatus[node] == "down":
             self.incr("started")
             ret = self.start(node)
             type = "down->up"
         else:
             return self.skipped()
 
         self.incr(type)
         if ret:
             return self.success()
         else:
             return self.failure("%s failure" % type)
 
 #        Register FlipTest as a good test to run
 AllTestClasses.append(FlipTest)
 
 
 class RestartTest(CTSTest):
     '''Stop and restart a node'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Restart"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
         self.benchmark = 1
 
     def __call__(self, node):
         '''Perform the 'restart' test. '''
         self.incr("calls")
 
         self.incr("node:" + node)
 
         ret1 = 1
         if self.CM.StataCM(node):
             self.incr("WasStopped")
             if not self.start(node):
                 return self.failure("start (setup) failure: "+node)
 
         self.set_timer()
         if not self.stop(node):
             return self.failure("stop failure: "+node)
         if not self.start(node):
             return self.failure("start failure: "+node)
         return self.success()
 
 #        Register RestartTest as a good test to run
 AllTestClasses.append(RestartTest)
 
 
 class StonithdTest(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "Stonithd"
         self.startall = SimulStartLite(cm)
         self.benchmark = 1
 
     def __call__(self, node):
         self.incr("calls")
         if len(self.Env["nodes"]) < 2:
             return self.skipped()
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         is_dc = self.CM.is_node_dc(node)
 
         watchpats = []
         watchpats.append(self.templates["Pat:Fencing_ok"] % node)
         watchpats.append(self.templates["Pat:NodeFenced"] % node)
 
         if self.Env["at-boot"] == 0:
             self.debug("Expecting %s to stay down" % node)
             self.CM.ShouldBeStatus[node] = "down"
         else:
             self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"]))
             watchpats.append("%s.* S_STARTING -> S_PENDING" % node)
             watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node)
 
         watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"])
         watch.setwatch()
 
         origin = self.Env.RandomGen.choice(self.Env["nodes"])
 
         (rc, _) = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node)
 
         if rc == 124: # CRM_EX_TIMEOUT
             # Look for the patterns, usually this means the required
             # device was running on the node to be fenced - or that
             # the required devices were in the process of being loaded
             # and/or moved
             #
             # Effectively the node committed suicide so there will be
             # no confirmation, but pacemaker should be watching and
             # fence the node again
 
             self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node))
 
         elif origin != node and rc != 0:
             self.debug("Waiting for the cluster to recover")
             self.CM.cluster_stable()
 
             self.debug("Waiting for fenced node to come back up")
             self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
 
             self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc))
 
         elif origin == node and rc != 255:
             # 255 == broken pipe, ie. the node was fenced as expected
             self.logger.log("Locally originated fencing returned %d" % rc)
 
         self.set_timer("fence")
         matched = watch.lookforall()
         self.log_timer("fence")
         self.set_timer("reform")
         if watch.unmatched:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         self.debug("Waiting for fenced node to come back up")
         self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
 
         self.debug("Waiting for the cluster to re-stabilize with all nodes")
         is_stable = self.CM.cluster_stable(self.Env["StartTime"])
 
         if not matched:
             return self.failure("Didn't find all expected patterns")
         elif not is_stable:
             return self.failure("Cluster did not become stable")
 
         self.log_timer("reform")
         return self.success()
 
     def errorstoignore(self):
         return [
             self.templates["Pat:Fencing_start"] % ".*",
             self.templates["Pat:Fencing_ok"] % ".*",
             self.templates["Pat:Fencing_active"],
             r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired",
         ]
 
     def is_applicable(self):
         if not self.is_applicable_common():
-            return 0
+            return False
 
         if "DoFencing" in list(self.Env.keys()):
             return self.Env["DoFencing"]
 
-        return 1
+        return True
 
 AllTestClasses.append(StonithdTest)
 
 
 class StartOnebyOne(CTSTest):
     '''Start all the nodes ~ one by one'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "StartOnebyOne"
         self.stopall = SimulStopLite(cm)
         self.start = StartTest(cm)
         self.ns = CTS.NodeStatus(cm.Env)
 
     def __call__(self, dummy):
         '''Perform the 'StartOnebyOne' test. '''
         self.incr("calls")
 
         #        We ignore the "node" parameter...
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Test setup failed")
 
         failed = []
         self.set_timer()
         for node in self.Env["nodes"]:
             if not self.start(node):
                 failed.append(node)
 
         if len(failed) > 0:
             return self.failure("Some node failed to start: " + repr(failed))
 
         return self.success()
 
 #        Register StartOnebyOne as a good test to run
 AllTestClasses.append(StartOnebyOne)
 
 
 class SimulStart(CTSTest):
     '''Start all the nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStart"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'SimulStart' test. '''
         self.incr("calls")
 
         #        We ignore the "node" parameter...
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Setup failed")
 
         if not self.startall(None):
             return self.failure("Startall failed")
 
         return self.success()
 
 #        Register SimulStart as a good test to run
 AllTestClasses.append(SimulStart)
 
 
 class SimulStop(CTSTest):
     '''Stop all the nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStop"
         self.startall = SimulStartLite(cm)
         self.stopall = SimulStopLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'SimulStop' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         if not self.stopall(None):
             return self.failure("Stopall failed")
 
         return self.success()
 
 #     Register SimulStop as a good test to run
 AllTestClasses.append(SimulStop)
 
 
 class StopOnebyOne(CTSTest):
     '''Stop all the nodes in order'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "StopOnebyOne"
         self.startall = SimulStartLite(cm)
         self.stop = StopTest(cm)
 
     def __call__(self, dummy):
         '''Perform the 'StopOnebyOne' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         failed = []
         self.set_timer()
         for node in self.Env["nodes"]:
             if not self.stop(node):
                 failed.append(node)
 
         if len(failed) > 0:
             return self.failure("Some node failed to stop: " + repr(failed))
 
         return self.success()
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(StopOnebyOne)
 
 
 class RestartOnebyOne(CTSTest):
     '''Restart all the nodes in order'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RestartOnebyOne"
         self.startall = SimulStartLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'RestartOnebyOne' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         did_fail = []
         self.set_timer()
         self.restart = RestartTest(self.CM)
         for node in self.Env["nodes"]:
             if not self.restart(node):
                 did_fail.append(node)
 
         if did_fail:
             return self.failure("Could not restart %d nodes: %s"
                                 % (len(did_fail), repr(did_fail)))
         return self.success()
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(RestartOnebyOne)
 
 
 class PartialStart(CTSTest):
     '''Start a node - but tell it to stop before it finishes starting up'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "PartialStart"
         self.startall = SimulStartLite(cm)
         self.stopall = SimulStopLite(cm)
         self.stop = StopTest(cm)
         #self.is_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'PartialStart' test. '''
         self.incr("calls")
 
         ret = self.stopall(None)
         if not ret:
             return self.failure("Setup failed")
 
         watchpats = []
         watchpats.append("pacemaker-controld.*Connecting to .* cluster infrastructure")
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
         watch.setwatch()
 
         self.CM.StartaCMnoBlock(node)
         ret = watch.lookforall()
         if not ret:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
             return self.failure("Setup of %s failed" % node)
 
         ret = self.stop(node)
         if not ret:
             return self.failure("%s did not stop in time" % node)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
 
         # We might do some fencing in the 2-node case if we make it up far enough
         return [
             r"Executing reboot fencing operation",
             r"Requesting fencing \([^)]+\) of node ",
         ]
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(PartialStart)
 
 
 class StandbyTest(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Standby"
         self.benchmark = 1
 
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
 
     # make sure the node is active
     # set the node to standby mode
     # check resources, none resource should be running on the node
     # set the node to active mode
     # check resouces, resources should have been migrated back (SHOULD THEY?)
 
     def __call__(self, node):
 
         self.incr("calls")
         ret = self.startall(None)
         if not ret:
             return self.failure("Start all nodes failed")
 
         self.debug("Make sure node %s is active" % node)
         if self.CM.StandbyStatus(node) != "off":
             if not self.CM.SetStandbyMode(node, "off"):
                 return self.failure("can't set node %s to active mode" % node)
 
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "off":
             return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
 
         self.debug("Getting resources running on node %s" % node)
         rsc_on_node = self.CM.active_resources(node)
 
         watchpats = []
         watchpats.append(r"State transition .* -> S_POLICY_ENGINE")
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
         watch.setwatch()
 
         self.debug("Setting node %s to standby mode" % node)
         if not self.CM.SetStandbyMode(node, "on"):
             return self.failure("can't set node %s to standby mode" % node)
 
         self.set_timer("on")
 
         ret = watch.lookforall()
         if not ret:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
             self.CM.SetStandbyMode(node, "off")
             return self.failure("cluster didn't react to standby change on %s" % node)
 
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "on":
             return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status))
         self.log_timer("on")
 
         self.debug("Checking resources")
         bad_run = self.CM.active_resources(node)
         if len(bad_run) > 0:
             rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run)))
             self.debug("Setting node %s to active mode" % node)
             self.CM.SetStandbyMode(node, "off")
             return rc
 
         self.debug("Setting node %s to active mode" % node)
         if not self.CM.SetStandbyMode(node, "off"):
             return self.failure("can't set node %s to active mode" % node)
 
         self.set_timer("off")
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "off":
             return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
         self.log_timer("off")
 
         return self.success()
 
 AllTestClasses.append(StandbyTest)
 
 
 class ValgrindTest(CTSTest):
     '''Check for memory leaks'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Valgrind"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
         self.is_valgrind = 1
         self.is_loop = 1
 
     def setup(self, node):
         self.incr("calls")
 
         ret = self.stopall(None)
         if not ret:
             return self.failure("Stop all nodes failed")
 
         # @TODO Edit /etc/sysconfig/pacemaker on all nodes to enable valgrind,
         # and clear any valgrind logs from previous runs. For now, we rely on
         # the user to do this manually.
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Start all nodes failed")
 
         return self.success()
 
     def teardown(self, node):
         # Return all nodes to normal
         # @TODO Edit /etc/sysconfig/pacemaker on all nodes to disable valgrind
         ret = self.stopall(None)
         if not ret:
             return self.failure("Stop all nodes failed")
 
         return self.success()
 
     def find_leaks(self):
         # Check for leaks
         # (no longer used but kept in case feature is restored)
         leaked = []
         self.stop = StopTest(self.CM)
 
         for node in self.Env["nodes"]:
             rc = self.stop(node)
             if not rc:
                 self.failure("Couldn't shut down %s" % node)
 
             (rc, _) = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat)
             if rc != 1:
                 leaked.append(node)
                 self.failure("Valgrind errors detected on %s" % node)
                 (_, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, verbose=1)
                 for line in output:
                     self.logger.log(line)
                 (_, output) = self.rsh(node, "cat %s" % self.logger.logPat, verbose=1)
                 for line in output:
                     self.debug(line)
 
         self.rsh(node, "rm -f %s" % self.logger.logPat, verbose=1)
         return leaked
 
     def __call__(self, node):
         #leaked = self.find_leaks()
         #if len(leaked) > 0:
         #    return self.failure("Nodes %s leaked" % repr(leaked))
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             r"pacemaker-based.*: \*\*\*\*\*\*\*\*\*\*\*\*\*",
             r"pacemaker-based.*: .* avoid confusing Valgrind",
             r"HA_VALGRIND_ENABLED",
         ]
 
 
 class StandbyLoopTest(ValgrindTest):
     '''Check for memory leaks by putting a node in and out of standby for an hour'''
     # @TODO This is not a useful test for memory leaks
     def __init__(self, cm):
         ValgrindTest.__init__(self,cm)
         self.name = "StandbyLoop"
 
     def __call__(self, node):
 
         lpc = 0
         delay = 2
         failed = 0
         done = time.time() + self.Env["loop-minutes"] * 60
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             time.sleep(delay)
             if not self.CM.SetStandbyMode(node, "on"):
                 self.failure("can't set node %s to standby mode" % node)
                 failed = lpc
 
             time.sleep(delay)
             if not self.CM.SetStandbyMode(node, "off"):
                 self.failure("can't set node %s to active mode" % node)
                 failed = lpc
 
         leaked = self.find_leaks()
         if failed:
             return self.failure("Iteration %d failed" % failed)
         elif len(leaked) > 0:
             return self.failure("Nodes %s leaked" % repr(leaked))
 
         return self.success()
 
 #AllTestClasses.append(StandbyLoopTest)
 
 
 class BandwidthTest(CTSTest):
 #        Tests should not be cluster-manager-specific
 #        If you need to find out cluster manager configuration to do this, then
 #        it should be added to the generic cluster manager API.
     '''Test the bandwidth which the cluster uses'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "Bandwidth"
         self.start = StartTest(cm)
         self.__setitem__("min",0)
         self.__setitem__("max",0)
         self.__setitem__("totalbandwidth",0)
         (handle, self.tempfile) = tempfile.mkstemp(".cts")
         os.close(handle)
         self.startall = SimulStartLite(cm)
 
     def __call__(self, node):
         '''Perform the Bandwidth test'''
         self.incr("calls")
 
         if self.CM.upcount() < 1:
             return self.skipped()
 
         Path = self.CM.InternalCommConfig()
         if "ip" not in Path["mediatype"]:
              return self.skipped()
 
         port = Path["port"][0]
         port = int(port)
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Test setup failed")
         time.sleep(5)  # We get extra messages right after startup.
 
         fstmpfile = "/var/run/band_estimate"
         dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \
         %                (port, fstmpfile)
 
         (rc, _) = self.rsh(node, dumpcmd)
         if rc == 0:
             farfile = "root@%s:%s" % (node, fstmpfile)
             self.rsh.copy(farfile, self.tempfile)
             Bandwidth = self.countbandwidth(self.tempfile)
             if not Bandwidth:
                 self.logger.log("Could not compute bandwidth.")
                 return self.success()
             intband = int(Bandwidth + 0.5)
             self.logger.log("...bandwidth: %d bits/sec" % intband)
             self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth
             if self.Stats["min"] == 0:
                 self.Stats["min"] = Bandwidth
             if Bandwidth > self.Stats["max"]:
                 self.Stats["max"] = Bandwidth
             if Bandwidth < self.Stats["min"]:
                 self.Stats["min"] = Bandwidth
             self.rsh(node, "rm -f %s" % fstmpfile)
             os.unlink(self.tempfile)
             return self.success()
         else:
             return self.failure("no response from tcpdump command [%d]!" % rc)
 
     def countbandwidth(self, file):
         fp = open(file, "r")
         fp.seek(0)
         count = 0
         sum = 0
         while 1:
             line = fp.readline()
             if not line:
                 return None
             if re.search("udp",line) or re.search("UDP,", line):
                 count = count + 1
                 linesplit = line.split(" ")
                 for j in range(len(linesplit)-1):
                     if linesplit[j] == "udp": break
                     if linesplit[j] == "length:": break
 
                 try:
                     sum = sum + int(linesplit[j+1])
                 except ValueError:
                     self.logger.log("Invalid tcpdump line: %s" % line)
                     return None
                 T1 = linesplit[0]
                 timesplit = T1.split(":")
                 time2split = timesplit[2].split(".")
                 time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001
                 break
 
         while count < 100:
             line = fp.readline()
             if not line:
                 return None
             if re.search("udp",line) or re.search("UDP,", line):
                 count = count+1
                 linessplit = line.split(" ")
                 for j in range(len(linessplit)-1):
                     if linessplit[j] == "udp": break
                     if linessplit[j] == "length:": break
                 try:
                     sum = int(linessplit[j+1]) + sum
                 except ValueError:
                     self.logger.log("Invalid tcpdump line: %s" % line)
                     return None
 
         T2 = linessplit[0]
         timesplit = T2.split(":")
         time2split = timesplit[2].split(".")
         time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001
         time = time2-time1
         if (time <= 0):
             return 0
         return int((sum*8)/time)
 
     def is_applicable(self):
         '''BandwidthTest never applicable'''
-        return 0
+        return False
 
 AllTestClasses.append(BandwidthTest)
 
 
 ###################################################################
 class MaintenanceMode(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "MaintenanceMode"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.max = 30
         #self.is_unsafe = 1
         self.benchmark = 1
         self.action = "asyncmon"
         self.interval = 0
         self.rid = "maintenanceDummy"
 
     def toggleMaintenanceMode(self, node, action):
         pats = []
         pats.append(self.templates["Pat:DC_IDLE"])
 
         # fail the resource right after turning Maintenance mode on
         # verify it is not recovered until maintenance mode is turned off
         if action == "On":
             pats.append(self.templates["Pat:RscOpFail"] % (self.action, self.rid))
         else:
             pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
             pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.debug("Turning maintenance mode %s" % action)
         self.rsh(node, self.templates["MaintenanceMode%s" % (action)])
         if (action == "On"):
             self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
 
         self.set_timer("recover%s" % (action))
         watch.lookforall()
         self.log_timer("recover%s" % (action))
         if watch.unmatched:
             self.debug("Failed to find patterns when turning maintenance mode %s" % action)
             return repr(watch.unmatched)
 
         return ""
 
     def insertMaintenanceDummy(self, node):
         pats = []
         pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self.rid)))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.CM.AddDummyRsc(node, self.rid)
 
         self.set_timer("addDummy")
         watch.lookforall()
         self.log_timer("addDummy")
 
         if watch.unmatched:
             self.debug("Failed to find patterns when adding maintenance dummy resource")
             return repr(watch.unmatched)
         return ""
 
     def removeMaintenanceDummy(self, node):
         pats = []
         pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
         self.CM.RemoveDummyRsc(node, self.rid)
 
         self.set_timer("removeDummy")
         watch.lookforall()
         self.log_timer("removeDummy")
 
         if watch.unmatched:
             self.debug("Failed to find patterns when removing maintenance dummy resource")
             return repr(watch.unmatched)
         return ""
 
     def managedRscList(self, node):
         rscList = []
         (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if tmp.managed():
                     rscList.append(tmp.id)
 
         return rscList
 
     def verifyResources(self, node, rscList, managed):
         managedList = list(rscList)
         managed_str = "managed"
         if not managed:
             managed_str = "unmanaged"
 
         (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if managed and not tmp.managed():
                     continue
                 elif not managed and tmp.managed():
                     continue
                 elif managedList.count(tmp.id):
                     managedList.remove(tmp.id)
 
         if len(managedList) == 0:
             self.debug("Found all %s resources on %s" % (managed_str, node))
             return True
 
         self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList))
         return False
 
     def __call__(self, node):
         '''Perform the 'MaintenanceMode' test. '''
         self.incr("calls")
         verify_managed = False
         verify_unmanaged = False
         failPat = ""
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         # get a list of all the managed resources. We use this list
         # after enabling maintenance mode to verify all managed resources
         # become un-managed.  After maintenance mode is turned off, we use
         # this list to verify all the resources become managed again.
         managedResources = self.managedRscList(node)
         if len(managedResources) == 0:
             self.logger.log("No managed resources on %s" % node)
             return self.skipped()
 
         # insert a fake resource we can fail during maintenance mode
         # so we can verify recovery does not take place until after maintenance
         # mode is disabled.
         failPat = failPat + self.insertMaintenanceDummy(node)
 
         # toggle maintenance mode ON, then fail dummy resource.
         failPat = failPat + self.toggleMaintenanceMode(node, "On")
 
         # verify all the resources are now unmanaged
         if self.verifyResources(node, managedResources, False):
             verify_unmanaged = True
 
         # Toggle maintenance mode  OFF, verify dummy is recovered.
         failPat = failPat + self.toggleMaintenanceMode(node, "Off")
 
         # verify all the resources are now managed again
         if self.verifyResources(node, managedResources, True):
             verify_managed = True
 
         # Remove our maintenance dummy resource.
         failPat = failPat + self.removeMaintenanceDummy(node)
 
         self.CM.cluster_stable()
 
         if failPat != "":
             return self.failure("Unmatched patterns: %s" % (failPat))
         elif verify_unmanaged is False:
             return self.failure("Failed to verify resources became unmanaged during maintenance mode")
         elif verify_managed is False:
             return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode")
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             r"Updating failcount for %s" % self.rid,
             r"schedulerd.*: Recover\s+%s\s+\(.*\)" % self.rid,
             r"Unknown operation: fail",
             self.templates["Pat:RscOpOK"] % (self.action, self.rid),
             r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval),
         ]
 
 AllTestClasses.append(MaintenanceMode)
 
 
 class ResourceRecover(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "ResourceRecover"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.max = 30
         self.rid = None
         self.rid_alt = None
         #self.is_unsafe = 1
         self.benchmark = 1
 
         # these are the values used for the new LRM API call
         self.action = "asyncmon"
         self.interval = 0
 
     def __call__(self, node):
         '''Perform the 'ResourceRecover' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         # List all resources active on the node (skip test if none)
         resourcelist = self.CM.active_resources(node)
         if len(resourcelist) == 0:
             self.logger.log("No active resources on %s" % node)
             return self.skipped()
 
         # Choose one resource at random
         rsc = self.choose_resource(node, resourcelist)
         if rsc is None:
             return self.failure("Could not get details of resource '%s'" % self.rid)
         if rsc.id == rsc.clone_id:
             self.debug("Failing " + rsc.id)
         else:
             self.debug("Failing " + rsc.id + " (also known as " + rsc.clone_id + ")")
 
         # Log patterns to watch for (failure, plus restart if managed)
         pats = []
         pats.append(self.templates["Pat:CloneOpFail"] % (self.action, rsc.id, rsc.clone_id))
         if rsc.managed():
             pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
             if rsc.unique():
                 pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid))
             else:
                 # Anonymous clones may get restarted with a different clone number
                 pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*"))
 
         # Fail resource. (Ideally, we'd fail it twice, to ensure the fail count
         # is incrementing properly, but it might restart on a different node.
         # We'd have to temporarily ban it from all other nodes and ensure the
         # migration-threshold hasn't been reached.)
         if self.fail_resource(rsc, node, pats) is None:
             return None # self.failure() already called
 
         return self.success()
 
     def choose_resource(self, node, resourcelist):
         """ Choose a random resource to target """
 
         self.rid = self.Env.RandomGen.choice(resourcelist)
         self.rid_alt = self.rid
         (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
         for line in lines:
             if line.startswith("Resource: "):
                 rsc = AuditResource(self.CM, line)
                 if rsc.id == self.rid:
                     # Handle anonymous clones that get renamed
                     self.rid = rsc.clone_id
                     return rsc
         return None
 
     def get_failcount(self, node):
         """ Check the fail count of targeted resource on given node """
 
         (rc, lines) = self.rsh(node,
                                "crm_failcount --quiet --query --resource %s "
                                "--operation %s --interval %d "
                                "--node %s" % (self.rid, self.action,
                                self.interval, node), verbose=1)
         if rc != 0 or len(lines) != 1:
             self.logger.log("crm_failcount on %s failed (%d): %s" % (node, rc,
                             " // ".join(map(str.strip, lines))))
             return -1
         try:
             failcount = int(lines[0])
         except (IndexError, ValueError):
             self.logger.log("crm_failcount output on %s unparseable: %s" % (node,
                             ' '.join(lines)))
             return -1
         return failcount
 
     def fail_resource(self, rsc, node, pats):
         """ Fail the targeted resource, and verify as expected """
 
         orig_failcount = self.get_failcount(node)
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
 
         self.set_timer("recover")
         watch.lookforall()
         self.log_timer("recover")
 
         self.CM.cluster_stable()
         recovered = self.CM.ResourceLocation(self.rid)
 
         if watch.unmatched:
             return self.failure("Patterns not found: %s" % repr(watch.unmatched))
 
         elif rsc.unique() and len(recovered) > 1:
             return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered)))
 
         elif len(recovered) > 0:
             self.debug("%s is running on: %s" % (self.rid, repr(recovered)))
 
         elif rsc.managed():
             return self.failure("%s was not recovered and is inactive" % self.rid)
 
         new_failcount = self.get_failcount(node)
         if new_failcount != (orig_failcount + 1):
             return self.failure("%s fail count is %d not %d" % (self.rid,
                                 new_failcount, orig_failcount + 1))
 
         return 0 # Anything but None is success
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             r"Updating failcount for %s" % self.rid,
             r"schedulerd.*: Recover\s+(%s|%s)\s+\(.*\)" % (self.rid, self.rid_alt),
             r"Unknown operation: fail",
             self.templates["Pat:RscOpOK"] % (self.action, self.rid),
             r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval),
         ]
 
 AllTestClasses.append(ResourceRecover)
 
 
 class ComponentFail(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "ComponentFail"
         self.startall = SimulStartLite(cm)
         self.complist = cm.Components()
         self.patterns = []
         self.okerrpatterns = []
         self.is_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'ComponentFail' test. '''
         self.incr("calls")
         self.patterns = []
         self.okerrpatterns = []
 
         # start all nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         if not self.CM.cluster_stable(self.Env["StableTime"]):
             return self.failure("Setup failed - unstable")
 
         node_is_dc = self.CM.is_node_dc(node, None)
 
         # select a component to kill
         chosen = self.Env.RandomGen.choice(self.complist)
         while chosen.dc_only == 1 and node_is_dc == 0:
             chosen = self.Env.RandomGen.choice(self.complist)
 
         self.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot))
         self.incr(chosen.name)
 
         if chosen.name != "corosync":
             self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name))
             self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name))
 
         self.patterns.extend(chosen.pats)
         if node_is_dc:
           self.patterns.extend(chosen.dc_pats)
 
         # @TODO this should be a flag in the Component
         if chosen.name in [ "corosync", "pacemaker-based", "pacemaker-fenced" ]:
             # Ignore actions for fence devices if fencer will respawn
             # (their registration will be lost, and probes will fail)
             self.okerrpatterns = [ self.templates["Pat:Fencing_active"] ]
             (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
             for line in lines:
                 if re.search("^Resource", line):
                     r = AuditResource(self.CM, line)
                     if r.rclass == "stonith":
                         self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id)
                         self.okerrpatterns.append(self.templates["Pat:Fencing_probe"] % r.id)
 
         # supply a copy so self.patterns doesn't end up empty
         tmpPats = []
         tmpPats.extend(self.patterns)
         self.patterns.extend(chosen.badnews_ignore)
 
         # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
         stonithPats = []
         stonithPats.append(self.templates["Pat:Fencing_ok"] % node)
         stonith = self.create_watch(stonithPats, 0)
         stonith.setwatch()
 
         # set the watch for stable
         watch = self.create_watch(
             tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"])
         watch.setwatch()
 
         # kill the component
         chosen.kill(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         self.debug("Waiting for any fenced node to come back up")
         self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
 
         self.debug("Waiting for the cluster to re-stabilize with all nodes")
         self.CM.cluster_stable(self.Env["StartTime"])
 
         self.debug("Checking if %s was shot" % node)
         shot = stonith.look(60)
         if shot:
             self.debug("Found: " + repr(shot))
             self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node)
 
             if self.Env["at-boot"] == 0:
                 self.CM.ShouldBeStatus[node] = "down"
 
             # If fencing occurred, chances are many (if not all) the expected logs
             # will not be sent - or will be lost when the node reboots
             return self.success()
 
         # check for logs indicating a graceful recovery
         matched = watch.lookforall(allow_multiple_matches=1)
         if watch.unmatched:
             self.logger.log("Patterns not found: " + repr(watch.unmatched))
 
         self.debug("Waiting for the cluster to re-stabilize with all nodes")
         is_stable = self.CM.cluster_stable(self.Env["StartTime"])
 
         if not matched:
             return self.failure("Didn't find all expected %s patterns" % chosen.name)
         elif not is_stable:
             return self.failure("Cluster did not become stable after killing %s" % chosen.name)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
     # Note that okerrpatterns refers to the last time we ran this test
     # The good news is that this works fine for us...
         self.okerrpatterns.extend(self.patterns)
         return self.okerrpatterns
 
 AllTestClasses.append(ComponentFail)
 
 
 class SplitBrainTest(CTSTest):
     '''It is used to test split-brain. when the path between the two nodes break
        check the two nodes both take over the resource'''
     def __init__(self,cm):
         CTSTest.__init__(self,cm)
         self.name = "SplitBrain"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.is_experimental = 1
 
     def isolate_partition(self, partition):
         other_nodes = []
         other_nodes.extend(self.Env["nodes"])
 
         for node in partition:
             try:
                 other_nodes.remove(node)
             except ValueError:
                 self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition))
 
         if len(other_nodes) == 0:
             return 1
 
         self.debug("Creating partition: " + repr(partition))
         self.debug("Everyone else: " + repr(other_nodes))
 
         for node in partition:
             if not self.CM.isolate_node(node, other_nodes):
                 self.logger.log("Could not isolate %s" % node)
                 return 0
 
         return 1
 
     def heal_partition(self, partition):
         other_nodes = []
         other_nodes.extend(self.Env["nodes"])
 
         for node in partition:
             try:
                 other_nodes.remove(node)
             except ValueError:
                 self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]))
 
         if len(other_nodes) == 0:
             return 1
 
         self.debug("Healing partition: " + repr(partition))
         self.debug("Everyone else: " + repr(other_nodes))
 
         for node in partition:
             self.CM.unisolate_node(node, other_nodes)
 
     def __call__(self, node):
         '''Perform split-brain test'''
         self.incr("calls")
         self.passed = 1
         partitions = {}
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         while 1:
             # Retry until we get multiple partitions
             partitions = {}
             p_max = len(self.Env["nodes"])
             for node in self.Env["nodes"]:
                 p = self.Env.RandomGen.randint(1, p_max)
                 if not p in partitions:
                     partitions[p] = []
                 partitions[p].append(node)
             p_max = len(list(partitions.keys()))
             if p_max > 1:
                 break
             # else, try again
 
         self.debug("Created %d partitions" % p_max)
         for key in list(partitions.keys()):
             self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key]))
 
         # Disabling STONITH to reduce test complexity for now
         self.rsh(node, "crm_attribute -V -n stonith-enabled -v false")
 
         for key in list(partitions.keys()):
             self.isolate_partition(partitions[key])
 
         count = 30
         while count > 0:
             if len(self.CM.find_partitions()) != p_max:
                 time.sleep(10)
             else:
                 break
         else:
             self.failure("Expected partitions were not created")
 
         # Target number of partitions formed - wait for stability
         if not self.CM.cluster_stable():
             self.failure("Partitioned cluster not stable")
 
         # Now audit the cluster state
         self.CM.partitions_expected = p_max
         if not self.audit():
             self.failure("Audits failed")
         self.CM.partitions_expected = 1
 
         # And heal them again
         for key in list(partitions.keys()):
             self.heal_partition(partitions[key])
 
         # Wait for a single partition to form
         count = 30
         while count > 0:
             if len(self.CM.find_partitions()) != 1:
                 time.sleep(10)
                 count -= 1
             else:
                 break
         else:
             self.failure("Cluster did not reform")
 
         # Wait for it to have the right number of members
         count = 30
         while count > 0:
             members = []
 
             partitions = self.CM.find_partitions()
             if len(partitions) > 0:
                 members = partitions[0].split()
 
             if len(members) != len(self.Env["nodes"]):
                 time.sleep(10)
                 count -= 1
             else:
                 break
         else:
             self.failure("Cluster did not completely reform")
 
         # Wait up to 20 minutes - the delay is more preferable than
         # trying to continue with in a messed up state
         if not self.CM.cluster_stable(1200):
             self.failure("Reformed cluster not stable")
             if self.Env["continue"] == 1:
                 answer = "Y"
             else:
                 try:
                     answer = input('Continue? [nY]')
                 except EOFError as e:
                     answer = "n" 
             if answer and answer == "n":
                 raise ValueError("Reformed cluster not stable")
 
         # Turn fencing back on
         if self.Env["DoFencing"]:
             self.rsh(node, "crm_attribute -V -D -n stonith-enabled")
 
         self.CM.cluster_stable()
 
         if self.passed:
             return self.success()
         return self.failure("See previous errors")
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return [
             r"Another DC detected:",
             r"(ERROR|error).*: .*Application of an update diff failed",
             r"pacemaker-controld.*:.*not in our membership list",
             r"CRIT:.*node.*returning after partition",
         ]
 
     def is_applicable(self):
         if not self.is_applicable_common():
-            return 0
+            return False
         return len(self.Env["nodes"]) > 2
 
 AllTestClasses.append(SplitBrainTest)
 
 
 class Reattach(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "Reattach"
         self.startall = SimulStartLite(cm)
         self.restart1 = RestartTest(cm)
         self.stopall = SimulStopLite(cm)
         self.is_unsafe = 0 # Handled by canrunnow()
 
     def _is_managed(self, node):
         (_, is_managed) = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", verbose=1)
         is_managed = is_managed[0].strip()
         return is_managed == "true"
 
     def _set_unmanaged(self, node):
         self.debug("Disable resource management")
         self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false")
 
     def _set_managed(self, node):
         self.debug("Re-enable resource management")
         self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D")
 
     def setup(self, node):
         attempt = 0
         if not self.startall(None):
             return None
 
         # Make sure we are really _really_ stable and that all
         # resources, including those that depend on transient node
         # attributes, are started
         while not self.CM.cluster_stable(double_check=True):
             if attempt < 5:
                 attempt += 1
                 self.debug("Not stable yet, re-testing")
             else:
                 self.logger.log("Cluster is not stable")
                 return None
 
         return 1
 
     def teardown(self, node):
 
         # Make sure 'node' is up
         start = StartTest(self.CM)
         start(node)
 
         if not self._is_managed(node):
             self.logger.log("Attempting to re-enable resource management on %s" % node)
             self._set_managed(node)
             self.CM.cluster_stable()
             if not self._is_managed(node):
                 self.logger.log("Could not re-enable resource management")
                 return 0
 
         return 1
 
     def canrunnow(self, node):
         '''Return TRUE if we can meaningfully run right now'''
         if self.find_ocfs2_resources(node):
             self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present")
             return 0
         return 1
 
     def __call__(self, node):
         self.incr("calls")
 
         pats = []
         # Conveniently, the scheduler will display this message when disabling
         # management, even if fencing is not enabled, so we can rely on it.
         managed = self.create_watch(["No fencing will be done"], 60)
         managed.setwatch()
 
         self._set_unmanaged(node)
 
         if not managed.lookforall():
             self.logger.log("Patterns not found: " + repr(managed.unmatched))
             return self.failure("Resource management not disabled")
 
         pats = []
         pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*"))
         pats.append(self.templates["Pat:RscOpOK"] % ("stop", ".*"))
         pats.append(self.templates["Pat:RscOpOK"] % ("promote", ".*"))
         pats.append(self.templates["Pat:RscOpOK"] % ("demote", ".*"))
         pats.append(self.templates["Pat:RscOpOK"] % ("migrate", ".*"))
 
         watch = self.create_watch(pats, 60, "ShutdownActivity")
         watch.setwatch()
 
         self.debug("Shutting down the cluster")
         ret = self.stopall(None)
         if not ret:
             self._set_managed(node)
             return self.failure("Couldn't shut down the cluster")
 
         self.debug("Bringing the cluster back up")
         ret = self.startall(None)
         time.sleep(5) # allow ping to update the CIB
         if not ret:
             self._set_managed(node)
             return self.failure("Couldn't restart the cluster")
 
         if self.local_badnews("ResourceActivity:", watch):
             self._set_managed(node)
             return self.failure("Resources stopped or started during cluster restart")
 
         watch = self.create_watch(pats, 60, "StartupActivity")
         watch.setwatch()
 
         # Re-enable resource management (and verify it happened).
         self._set_managed(node)
         self.CM.cluster_stable()
         if not self._is_managed(node):
             return self.failure("Could not re-enable resource management")
 
         # Ignore actions for STONITH resources
         ignore = []
         (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rclass == "stonith":
 
                     self.debug("Ignoring start actions for %s" % r.id)
                     ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id))
 
         if self.local_badnews("ResourceActivity:", watch, ignore):
             return self.failure("Resources stopped or started after resource management was re-enabled")
 
         return ret
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             r"resource( was|s were) active at shutdown",
         ]
 
     def is_applicable(self):
-        return 1
+        return True
 
 AllTestClasses.append(Reattach)
 
 
 class SpecialTest1(CTSTest):
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SpecialTest1"
         self.startall = SimulStartLite(cm)
         self.restart1 = RestartTest(cm)
         self.stopall = SimulStopLite(cm)
 
     def __call__(self, node):
         '''Perform the 'SpecialTest1' test for Andrew. '''
         self.incr("calls")
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Could not stop all nodes")
 
         # Test config recovery when the other nodes come up
         self.rsh(node, "rm -f " + BuildOptions.CIB_DIR + "/cib*")
 
         #        Start the selected node
         ret = self.restart1(node)
         if not ret:
             return self.failure("Could not start "+node)
 
         #        Start all remaining nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Could not start the remaining nodes")
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         # Errors that occur as a result of the CIB being wiped
         return [
             r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed",
             r"error.*: Resource start-up disabled since no STONITH resources have been defined",
             r"error.*: Either configure some or disable STONITH with the stonith-enabled option",
             r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity",
         ]
 
 AllTestClasses.append(SpecialTest1)
 
 
 class HAETest(CTSTest):
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "HAETest"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
         self.is_loop = 1
 
     def setup(self, node):
         #  Start all remaining nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Couldn't start all nodes")
         return self.success()
 
     def teardown(self, node):
         # Stop everything
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
         return self.success()
 
     def wait_on_state(self, node, resource, expected_clones, attempts=240):
         while attempts > 0:
             active = 0
             (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, verbose=1)
 
             # Hack until crm_resource does the right thing
             if rc == 0 and lines:
                 active = len(lines)
 
             if len(lines) == expected_clones:
                 return 1
 
             elif rc == 1:
                 self.debug("Resource %s is still inactive" % resource)
 
             elif rc == 234:
                 self.logger.log("Unknown resource %s" % resource)
                 return 0
 
             elif rc == 246:
                 self.logger.log("Cluster is inactive")
                 return 0
 
             elif rc != 0:
                 self.logger.log("Call to crm_resource failed, rc=%d" % rc)
                 return 0
 
             else:
                 self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones))
 
             attempts -= 1
             time.sleep(1)
 
         return 0
 
     def find_dlm(self, node):
         self.r_dlm = None
 
         (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rtype == "controld" and r.parent != "NA":
                     self.debug("Found dlm: %s" % self.r_dlm)
                     self.r_dlm = r.parent
                     return 1
         return 0
 
     def find_hae_resources(self, node):
         self.r_dlm = None
         self.r_o2cb = None
         self.r_ocfs2 = []
 
         if self.find_dlm(node):
             self.find_ocfs2_resources(node)
 
     def is_applicable(self):
         if not self.is_applicable_common():
-            return 0
+            return False
         if self.Env["Schema"] == "hae":
-            return 1
+            return True
         return None
 
 
 class HAERoleTest(HAETest):
     def __init__(self, cm):
         '''Lars' mount/unmount test for the HA extension. '''
         HAETest.__init__(self,cm)
         self.name = "HAERoleTest"
 
     def change_state(self, node, resource, target):
         (rc, _) = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s  --meta" % (resource, target))
         return rc
 
     def __call__(self, node):
         self.incr("calls")
         lpc = 0
         failed = 0
         delay = 2
         done = time.time() + self.Env["loop-minutes"]*60
         self.find_hae_resources(node)
 
         clone_max = len(self.Env["nodes"])
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             self.change_state(node, self.r_dlm, "Stopped")
             if not self.wait_on_state(node, self.r_dlm, 0):
                 self.failure("%s did not go down correctly" % self.r_dlm)
                 failed = lpc
 
             self.change_state(node, self.r_dlm, "Started")
             if not self.wait_on_state(node, self.r_dlm, clone_max):
                 self.failure("%s did not come up correctly" % self.r_dlm)
                 failed = lpc
 
             if not self.wait_on_state(node, self.r_o2cb, clone_max):
                 self.failure("%s did not come up correctly" % self.r_o2cb)
                 failed = lpc
 
             for fs in self.r_ocfs2:
                 if not self.wait_on_state(node, fs, clone_max):
                     self.failure("%s did not come up correctly" % fs)
                     failed = lpc
 
         if failed:
             return self.failure("iteration %d failed" % failed)
         return self.success()
 
 AllTestClasses.append(HAERoleTest)
 
 
 class HAEStandbyTest(HAETest):
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         HAETest.__init__(self,cm)
         self.name = "HAEStandbyTest"
 
     def change_state(self, node, resource, target):
         (rc, _) = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target))
         return rc
 
     def __call__(self, node):
         self.incr("calls")
 
         lpc = 0
         failed = 0
         done = time.time() + self.Env["loop-minutes"]*60
         self.find_hae_resources(node)
 
         clone_max = len(self.Env["nodes"])
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             self.change_state(node, self.r_dlm, "true")
             if not self.wait_on_state(node, self.r_dlm, clone_max-1):
                 self.failure("%s did not go down correctly" % self.r_dlm)
                 failed = lpc
 
             self.change_state(node, self.r_dlm, "false")
             if not self.wait_on_state(node, self.r_dlm, clone_max):
                 self.failure("%s did not come up correctly" % self.r_dlm)
                 failed = lpc
 
             if not self.wait_on_state(node, self.r_o2cb, clone_max):
                 self.failure("%s did not come up correctly" % self.r_o2cb)
                 failed = lpc
 
             for fs in self.r_ocfs2:
                 if not self.wait_on_state(node, fs, clone_max):
                     self.failure("%s did not come up correctly" % fs)
                     failed = lpc
 
         if failed:
             return self.failure("iteration %d failed" % failed)
         return self.success()
 
 AllTestClasses.append(HAEStandbyTest)
 
 
 class NearQuorumPointTest(CTSTest):
     '''
     This test brings larger clusters near the quorum point (50%).
     In addition, it will test doing starts and stops at the same time.
 
     Here is how I think it should work:
     - loop over the nodes and decide randomly which will be up and which
       will be down  Use a 50% probability for each of up/down.
     - figure out what to do to get into that state from the current state
     - in parallel, bring up those going up  and bring those going down.
     '''
 
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "NearQuorumPoint"
 
     def __call__(self, dummy):
         '''Perform the 'NearQuorumPoint' test. '''
         self.incr("calls")
         startset = []
         stopset = []
 
         stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint")
         #decide what to do with each node
         for node in self.Env["nodes"]:
             action = self.Env.RandomGen.choice(["start","stop"])
             #action = self.Env.RandomGen.choice(["start","stop","no change"])
             if action == "start" :
                 startset.append(node)
             elif action == "stop" :
                 stopset.append(node)
 
         self.debug("start nodes:" + repr(startset))
         self.debug("stop nodes:" + repr(stopset))
 
         #add search patterns
         watchpats = [ ]
         for node in stopset:
             if self.CM.ShouldBeStatus[node] == "up":
                 watchpats.append(self.templates["Pat:We_stopped"] % node)
 
         for node in startset:
             if self.CM.ShouldBeStatus[node] == "down":
                 #watchpats.append(self.templates["Pat:NonDC_started"] % node)
                 watchpats.append(self.templates["Pat:Local_started"] % node)
             else:
                 for stopping in stopset:
                     if self.CM.ShouldBeStatus[stopping] == "up":
                         watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping)))
 
         if len(watchpats) == 0:
             return self.skipped()
 
         if len(startset) != 0:
             watchpats.append(self.templates["Pat:DC_IDLE"])
 
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
 
         watch.setwatch()
 
         #begin actions
         for node in stopset:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.CM.StopaCMnoBlock(node)
 
         for node in startset:
             if self.CM.ShouldBeStatus[node] == "down":
                 self.CM.StartaCMnoBlock(node)
 
         #get the result
         if watch.lookforall():
             self.CM.cluster_stable()
             self.CM.fencing_cleanup("NearQuorumPoint", stonith)
             return self.success()
 
         self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched))
 
         #get the "bad" nodes
         upnodes = []
         for node in stopset:
             if self.CM.StataCM(node) == 1:
                 upnodes.append(node)
 
         downnodes = []
         for node in startset:
             if self.CM.StataCM(node) == 0:
                 downnodes.append(node)
 
         self.CM.fencing_cleanup("NearQuorumPoint", stonith)
         if upnodes == [] and downnodes == []:
             self.CM.cluster_stable()
 
             # Make sure they're completely down with no residule
             for node in stopset:
                 self.rsh(node, self.templates["StopCmd"])
 
             return self.success()
 
         if len(upnodes) > 0:
             self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes))
 
         if len(downnodes) > 0:
             self.logger.log("Warn: Unstartable nodes: " + repr(downnodes))
 
         return self.failure()
 
     def is_applicable(self):
-        return 1
+        return True
 
 AllTestClasses.append(NearQuorumPointTest)
 
 
 class RollingUpgradeTest(CTSTest):
     '''Perform a rolling upgrade of the cluster'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RollingUpgrade"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
 
     def setup(self, node):
         #  Start all remaining nodes
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
 
         for node in self.Env["nodes"]:
             if not self.downgrade(node, None):
                 return self.failure("Couldn't downgrade %s" % node)
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Couldn't start all nodes")
         return self.success()
 
     def teardown(self, node):
         # Stop everything
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
 
         for node in self.Env["nodes"]:
             if not self.upgrade(node, None):
                 return self.failure("Couldn't upgrade %s" % node)
 
         return self.success()
 
     def install(self, node, version, start=1, flags="--force"):
 
         target_dir = "/tmp/rpm-%s" % version
         src_dir = "%s/%s" % (self.Env["rpm-dir"], version)
 
         self.logger.log("Installing %s on %s with %s" % (version, node, flags))
         if not self.stop(node):
             return self.failure("stop failure: "+node)
 
         self.rsh(node, "mkdir -p %s" % target_dir)
         self.rsh(node, "rm -f %s/*.rpm" % target_dir)
         (_, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, verbose=1)
         for line in lines:
             line = line[:-1]
             rc = self.rsh.copy("%s" % (line), "%s:%s/" % (node, target_dir))
         self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir))
 
         if start and not self.start(node):
             return self.failure("start failure: "+node)
 
         return self.success()
 
     def upgrade(self, node, start=1):
         return self.install(node, self.Env["current-version"], start)
 
     def downgrade(self, node, start=1):
         return self.install(node, self.Env["previous-version"], start, "--force --nodeps")
 
     def __call__(self, node):
         '''Perform the 'Rolling Upgrade' test. '''
         self.incr("calls")
 
         for node in self.Env["nodes"]:
             if self.upgrade(node):
                 return self.failure("Couldn't upgrade %s" % node)
 
             self.CM.cluster_stable()
 
         return self.success()
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return None
 
         if not "rpm-dir" in list(self.Env.keys()):
             return None
         if not "current-version" in list(self.Env.keys()):
             return None
         if not "previous-version" in list(self.Env.keys()):
             return None
 
         return 1
 
 #        Register RestartTest as a good test to run
 AllTestClasses.append(RollingUpgradeTest)
 
 
 class BSC_AddResource(CTSTest):
     '''Add a resource to the cluster'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "AddResource"
         self.resource_offset = 0
         self.cib_cmd = """cibadmin -C -o %s -X '%s' """
 
     def __call__(self, node):
         self.incr("calls")
         self.resource_offset =         self.resource_offset  + 1
 
         r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset)
         start_pat = "pacemaker-controld.*%s_start_0.*confirmed.*ok"
 
         patterns = []
         patterns.append(start_pat % r_id)
 
         watch = self.create_watch(patterns, self.Env["DeadTime"])
         watch.setwatch()
 
         ip = self.NextIP()
         if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip):
             return self.failure("Make resource %s failed" % r_id)
 
         failed = 0
         watch_result = watch.lookforall()
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.logger.log ("Warn: Pattern not found: %s" % (regex))
                 failed = 1
 
         if failed:
             return self.failure("Resource pattern(s) not found")
 
         if not self.CM.cluster_stable(self.Env["DeadTime"]):
             return self.failure("Unstable cluster")
 
         return self.success()
 
     def NextIP(self):
         ip = self.Env["IPBase"]
         if ":" in ip:
             fields = ip.rpartition(":")
             fields[2] = str(hex(int(fields[2], 16)+1))
             print(str(hex(int(f[2], 16)+1)))
         else:
             fields = ip.rpartition('.')
             fields[2] = str(int(fields[2])+1)
 
         ip = fields[0] + fields[1] + fields[3];
         self.Env["IPBase"] = ip
         return ip.strip()
 
     def make_ip_resource(self, node, id, rclass, type, ip):
         self.logger.log("Creating %s:%s:%s (%s) on %s" % (rclass,type,id,ip,node))
         rsc_xml="""
 <primitive id="%s" class="%s" type="%s"  provider="heartbeat">
     <instance_attributes id="%s"><attributes>
         <nvpair id="%s" name="ip" value="%s"/>
     </attributes></instance_attributes>
 </primitive>""" % (id, rclass, type, id, id, ip)
 
         node_constraint = """
       <rsc_location id="run_%s" rsc="%s">
         <rule id="pref_run_%s" score="100">
           <expression id="%s_loc_expr" attribute="#uname" operation="eq" value="%s"/>
         </rule>
       </rsc_location>""" % (id, id, id, id, node)
 
         rc = 0
         (rc, _) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), verbose=1)
         if rc != 0:
             self.logger.log("Constraint creation failed: %d" % rc)
             return None
 
         (rc, _) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), verbose=1)
         if rc != 0:
             self.logger.log("Resource creation failed: %d" % rc)
             return None
 
         return 1
 
     def is_applicable(self):
         if self.Env["DoBSC"]:
-            return 1
+            return True
         return None
 
 AllTestClasses.append(BSC_AddResource)
 
 
 class SimulStopLite(CTSTest):
     '''Stop any active nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStopLite"
 
     def __call__(self, dummy):
         '''Perform the 'SimulStopLite' setup work. '''
         self.incr("calls")
 
         self.debug("Setup: " + self.name)
 
         #     We ignore the "node" parameter...
         watchpats = [ ]
 
         for node in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.incr("WasStarted")
                 watchpats.append(self.templates["Pat:We_stopped"] % node)
 
         if len(watchpats) == 0:
             return self.success()
 
         #     Stop all the nodes - at about the same time...
         watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
 
         watch.setwatch()
         self.set_timer()
         for node in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.CM.StopaCMnoBlock(node)
         if watch.lookforall():
             # Make sure they're completely down with no residule
             for node in self.Env["nodes"]:
                 self.rsh(node, self.templates["StopCmd"])
 
             return self.success()
 
         did_fail = 0
         up_nodes = []
         for node in self.Env["nodes"]:
             if self.CM.StataCM(node) == 1:
                 did_fail = 1
                 up_nodes.append(node)
 
         if did_fail:
             return self.failure("Active nodes exist: " + repr(up_nodes))
 
         self.logger.log("Warn: All nodes stopped but CTS didn't detect: "
                     + repr(watch.unmatched))
 
         return self.failure("Missing log message: "+repr(watch.unmatched))
 
     def is_applicable(self):
         '''SimulStopLite is a setup test and never applicable'''
-        return 0
+        return False
 
 
 class SimulStartLite(CTSTest):
     '''Start any stopped nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "SimulStartLite"
 
     def __call__(self, dummy):
         '''Perform the 'SimulStartList' setup work. '''
         self.incr("calls")
         self.debug("Setup: " + self.name)
 
         #        We ignore the "node" parameter...
         node_list = []
         for node in self.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "down":
                 self.incr("WasStopped")
                 node_list.append(node)
 
         self.set_timer()
         while len(node_list) > 0:
             # Repeat until all nodes come up
             watchpats = [ ]
 
             uppat = self.templates["Pat:NonDC_started"]
             if self.CM.upcount() == 0:
                 uppat = self.templates["Pat:Local_started"]
 
             watchpats.append(self.templates["Pat:DC_IDLE"])
             for node in node_list:
                 watchpats.append(uppat % node)
                 watchpats.append(self.templates["Pat:InfraUp"] % node)
                 watchpats.append(self.templates["Pat:PacemakerUp"] % node)
 
             #   Start all the nodes - at about the same time...
             watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
             watch.setwatch()
 
             stonith = self.CM.prepare_fencing_watcher(self.name)
 
             for node in node_list:
                 self.CM.StartaCMnoBlock(node)
 
             watch.lookforall()
 
             node_list = self.CM.fencing_cleanup(self.name, stonith)
 
             if node_list == None:
                 return self.failure("Cluster did not stabilize")
 
             # Remove node_list messages from watch.unmatched
             for node in node_list:
                 self.logger.debug("Dealing with stonith operations for %s" % repr(node_list))
                 if watch.unmatched:
                     try:
                         watch.unmatched.remove(uppat % node)
                     except:
                         self.debug("Already matched: %s" % (uppat % node))
                     try:                        
                         watch.unmatched.remove(self.templates["Pat:InfraUp"] % node)
                     except:
                         self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node))
                     try:
                         watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node)
                     except:
                         self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node))
 
             if watch.unmatched:
                 for regex in watch.unmatched:
                     self.logger.log ("Warn: Startup pattern not found: %s" %(regex))
 
             if not self.CM.cluster_stable():
                 return self.failure("Cluster did not stabilize")
 
         did_fail = 0
         unstable = []
         for node in self.Env["nodes"]:
             if self.CM.StataCM(node) == 0:
                 did_fail = 1
                 unstable.append(node)
 
         if did_fail:
             return self.failure("Unstarted nodes exist: " + repr(unstable))
 
         unstable = []
         for node in self.Env["nodes"]:
             if not self.CM.node_stable(node):
                 did_fail = 1
                 unstable.append(node)
 
         if did_fail:
             return self.failure("Unstable cluster nodes exist: " + repr(unstable))
 
         return self.success()
 
     def is_applicable(self):
         '''SimulStartLite is a setup test and never applicable'''
-        return 0
+        return False
 
 
 def TestList(cm, audits):
     result = []
     for testclass in AllTestClasses:
         bound_test = testclass(cm)
         if bound_test.is_applicable():
             bound_test.Audits = audits
             result.append(bound_test)
     return result
 
 
 class RemoteLXC(CTSTest):
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = "RemoteLXC"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.num_containers = 2
         self.is_container = 1
         self.failed = 0
         self.fail_string = ""
 
     def start_lxc_simple(self, node):
 
         # restore any artifacts laying around from a previous test.
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
 
         # generate the containers, put them in the config, add some resources to them
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1"))
         pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2"))
         pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms"))
         pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms"))
 
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers)
         self.set_timer("remoteSimpleInit")
         watch.lookforall()
         self.log_timer("remoteSimpleInit")
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
     def cleanup_lxc_simple(self, node):
 
         pats = [ ]
         # if the test failed, attempt to clean up the cib and libvirt environment
         # as best as possible 
         if self.failed == 1:
             # restore libvirt and cib
             self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
             return
 
         watch = self.create_watch(pats, 120)
         watch.setwatch()
 
         pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1"))
         pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2"))
 
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null")
         self.set_timer("remoteSimpleCleanup")
         watch.lookforall()
         self.log_timer("remoteSimpleCleanup")
 
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
         # cleanup libvirt
         self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
 
     def __call__(self, node):
         '''Perform the 'RemoteLXC' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed, start all nodes failed.")
 
         (rc, _) = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null")
         if rc == 1:
             self.log("Environment test for lxc support failed.")
             return self.skipped()
 
         self.start_lxc_simple(node)
         self.cleanup_lxc_simple(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         if self.failed == 1:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             r"Updating failcount for ping",
             r"schedulerd.*: Recover\s+(ping|lxc-ms|container)\s+\(.*\)",
             # The orphaned lxc-ms resource causes an expected transition error
             # that is a result of the scheduler not having knowledge that the
             # promotable resource used to be a clone. As a result, it looks like that 
             # resource is running in multiple locations when it shouldn't... But in
             # this instance we know why this error is occurring and that it is expected.
             r"Calculated [Tt]ransition .*pe-error",
             r"Resource lxc-ms .* is active on 2 nodes attempting recovery",
             r"Unknown operation: fail",
             r"VirtualDomain.*ERROR: Unable to determine emulator",
         ]
 
 AllTestClasses.append(RemoteLXC)
 
 
 class RemoteDriver(CTSTest):
 
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name = self.__class__.__name__
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.stop = StopTest(cm)
         self.remote_rsc = "remote-rsc"
         self.cib_cmd = """cibadmin -C -o %s -X '%s' """
         self.reset()
 
     def reset(self):
         self.pcmk_started = 0
         self.failed = False
         self.fail_string = ""
         self.remote_node_added = 0
         self.remote_rsc_added = 0
         self.remote_use_reconnect_interval = self.Env.RandomGen.choice([True,False])
 
     def fail(self, msg):
         """ Mark test as failed. """
 
         self.failed = True
 
         # Always log the failure.
         self.logger.log(msg)
 
         # Use first failure as test status, as it's likely to be most useful.
         if not self.fail_string:
             self.fail_string = msg
 
     def get_othernode(self, node):
         for othernode in self.Env["nodes"]:
             if othernode == node:
                 # we don't want to try and use the cib that we just shutdown.
                 # find a cluster node that is not our soon to be remote-node.
                 continue
             else:
                 return othernode
 
     def del_rsc(self, node, rsc):
         othernode = self.get_othernode(node)
         (rc, _) = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc))
         if rc != 0:
             self.fail("Removal of resource '%s' failed" % rsc)
 
     def add_rsc(self, node, rsc_xml):
         othernode = self.get_othernode(node)
         (rc, _) = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml))
         if rc != 0:
             self.fail("resource creation failed")
 
     def add_primitive_rsc(self, node):
         rsc_xml = """
 <primitive class="ocf" id="%(node)s" provider="heartbeat" type="Dummy">
   <meta_attributes id="%(node)s-meta_attributes"/>
   <operations>
     <op id="%(node)s-monitor-interval-20s" interval="20s" name="monitor"/>
   </operations>
 </primitive>""" % { "node": self.remote_rsc }
         self.add_rsc(node, rsc_xml)
         if not self.failed:
             self.remote_rsc_added = 1
 
     def add_connection_rsc(self, node):
         rsc_xml = """
 <primitive class="ocf" id="%(node)s" provider="pacemaker" type="remote">
   <instance_attributes id="%(node)s-instance_attributes">
     <nvpair id="%(node)s-instance_attributes-server" name="server" value="%(server)s"/>
 """ % { "node": self.remote_node, "server": node }
 
         if self.remote_use_reconnect_interval:
             # Set reconnect interval on resource
             rsc_xml = rsc_xml + """
     <nvpair id="%s-instance_attributes-reconnect_interval" name="reconnect_interval" value="60s"/>
 """ % (self.remote_node)
 
         rsc_xml = rsc_xml + """
   </instance_attributes>
   <operations>
     <op id="%(node)s-start"       name="start"   interval="0"   timeout="120s"/>
     <op id="%(node)s-monitor-20s" name="monitor" interval="20s" timeout="45s"/>
   </operations>
 </primitive>
 """ % { "node": self.remote_node }
 
         self.add_rsc(node, rsc_xml)
         if not self.failed:
             self.remote_node_added = 1
 
     def disable_services(self, node):
         self.corosync_enabled = self.Env.service_is_enabled(node, "corosync")
         if self.corosync_enabled:
             self.Env.disable_service(node, "corosync")
 
         self.pacemaker_enabled = self.Env.service_is_enabled(node, "pacemaker")
         if self.pacemaker_enabled:
             self.Env.disable_service(node, "pacemaker")
 
     def restore_services(self, node):
         if self.corosync_enabled:
             self.Env.enable_service(node, "corosync")
 
         if self.pacemaker_enabled:
             self.Env.enable_service(node, "pacemaker")
 
     def stop_pcmk_remote(self, node):
         # disable pcmk remote
         for i in range(10):
             (rc, _) = self.rsh(node, "service pacemaker_remote stop")
             if rc != 0:
                 time.sleep(6)
             else:
                 break
 
     def start_pcmk_remote(self, node):
         for i in range(10):
             (rc, _) = self.rsh(node, "service pacemaker_remote start")
             if rc != 0:
                 time.sleep(6)
             else:
                 self.pcmk_started = 1
                 break
 
     def freeze_pcmk_remote(self, node):
         """ Simulate a Pacemaker Remote daemon failure. """
 
         # We freeze the process.
         self.rsh(node, "killall -STOP pacemaker-remoted")
 
     def resume_pcmk_remote(self, node):
         # We resume the process.
         self.rsh(node, "killall -CONT pacemaker-remoted")
 
     def start_metal(self, node):
         # Cluster nodes are reused as remote nodes in remote tests. If cluster
         # services were enabled at boot, in case the remote node got fenced, the
         # cluster node would join instead of the expected remote one. Meanwhile
         # pacemaker_remote would not be able to start. Depending on the chances,
         # the situations might not be able to be orchestrated gracefully any more.
         #
         # Temporarily disable any enabled cluster serivces.
         self.disable_services(node)
 
         pcmk_started = 0
 
         # make sure the resource doesn't already exist for some reason
         self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc))
         self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node))
 
         if not self.stop(node):
             self.fail("Failed to shutdown cluster node %s" % node)
             return
 
         self.start_pcmk_remote(node)
 
         if self.pcmk_started == 0:
             self.fail("Failed to start pacemaker_remote on node %s" % node)
             return
 
         # Convert node to baremetal now that it has shutdown the cluster stack
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node))
         pats.append(self.templates["Pat:DC_IDLE"])
 
         self.add_connection_rsc(node)
 
         self.set_timer("remoteMetalInit")
         watch.lookforall()
         self.log_timer("remoteMetalInit")
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
 
     def migrate_connection(self, node):
         if self.failed:
             return
 
         pats = [ ]
         pats.append(self.templates["Pat:RscOpOK"] % ("migrate_to", self.remote_node))
         pats.append(self.templates["Pat:RscOpOK"] % ("migrate_from", self.remote_node))
         pats.append(self.templates["Pat:DC_IDLE"])
         watch = self.create_watch(pats, 120)
         watch.setwatch()
 
         (rc, _) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), verbose=1)
         if rc != 0:
             self.fail("failed to move remote node connection resource")
             return
 
         self.set_timer("remoteMetalMigrate")
         watch.lookforall()
         self.log_timer("remoteMetalMigrate")
 
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
             return
 
     def fail_rsc(self, node):
         if self.failed:
             return
 
         watchpats = [ ]
         watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("stop", self.remote_rsc, self.remote_node))
         watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
         watchpats.append(self.templates["Pat:DC_IDLE"])
 
         watch = self.create_watch(watchpats, 120)
         watch.setwatch()
 
         self.debug("causing dummy rsc to fail.")
 
         self.rsh(node, "rm -f /var/run/resource-agents/Dummy*")
 
         self.set_timer("remoteRscFail")
         watch.lookforall()
         self.log_timer("remoteRscFail")
         if watch.unmatched:
             self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched)
 
     def fail_connection(self, node):
         if self.failed:
             return
 
         watchpats = [ ]
         watchpats.append(self.templates["Pat:Fencing_ok"] % self.remote_node)
         watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node)
 
         watch = self.create_watch(watchpats, 120)
         watch.setwatch()
 
         # freeze the pcmk remote daemon. this will result in fencing
         self.debug("Force stopped active remote node")
         self.freeze_pcmk_remote(node)
 
         self.debug("Waiting for remote node to be fenced.")
         self.set_timer("remoteMetalFence")
         watch.lookforall()
         self.log_timer("remoteMetalFence")
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
             return
 
         self.debug("Waiting for the remote node to come back up")
         self.CM.ns.WaitForNodeToComeUp(node, 120);
 
         pats = [ ]
         watch = self.create_watch(pats, 240)
         watch.setwatch()
         pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node))
         if self.remote_rsc_added == 1:
             pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
 
         # start the remote node again watch it integrate back into cluster.
         self.start_pcmk_remote(node)
         if self.pcmk_started == 0:
             self.fail("Failed to start pacemaker_remote on node %s" % node)
             return
 
         self.debug("Waiting for remote node to rejoin cluster after being fenced.")
         self.set_timer("remoteMetalRestart")
         watch.lookforall()
         self.log_timer("remoteMetalRestart")
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
             return
 
     def add_dummy_rsc(self, node):
         if self.failed:
             return
 
         # verify we can put a resource on the remote node
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
         pats.append(self.templates["Pat:DC_IDLE"])
 
         # Add a resource that must live on remote-node
         self.add_primitive_rsc(node)
 
         # force that rsc to prefer the remote node. 
         (rc, _) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), verbose=1)
         if rc != 0:
             self.fail("Failed to place remote resource on remote node.")
             return
 
         self.set_timer("remoteMetalRsc")
         watch.lookforall()
         self.log_timer("remoteMetalRsc")
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
 
     def test_attributes(self, node):
         if self.failed:
             return
 
         # This verifies permanent attributes can be set on a remote-node. It also
         # verifies the remote-node can edit its own cib node section remotely.
         (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), verbose=1)
         if rc != 0:
             self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line))
             return
 
         (rc, _) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % (self.remote_node), verbose=1)
         if rc != 0:
             self.fail("Failed to get remote-node attribute")
             return
 
         (rc, _) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), verbose=1)
         if rc != 0:
             self.fail("Failed to delete remote-node attribute")
             return
 
     def cleanup_metal(self, node):
         self.restore_services(node)
 
         if self.pcmk_started == 0:
             return
 
         pats = [ ]
 
         watch = self.create_watch(pats, 120)
         watch.setwatch()
 
         if self.remote_rsc_added == 1:
             pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_rsc))
         if self.remote_node_added == 1:
             pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_node))
 
         self.set_timer("remoteMetalCleanup")
 
         self.resume_pcmk_remote(node)
 
         if self.remote_rsc_added == 1:
 
             # Remove dummy resource added for remote node tests
             self.debug("Cleaning up dummy rsc put on remote node")
             self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % self.remote_rsc)
             self.del_rsc(node, self.remote_rsc)
 
         if self.remote_node_added == 1:
 
             # Remove remote node's connection resource
             self.debug("Cleaning up remote node connection resource")
             self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % (self.remote_node))
             self.del_rsc(node, self.remote_node)
 
         watch.lookforall()
         self.log_timer("remoteMetalCleanup")
 
         if watch.unmatched:
             self.fail("Unmatched patterns: %s" % watch.unmatched)
 
         self.stop_pcmk_remote(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         if self.remote_node_added == 1:
             # Remove remote node itself
             self.debug("Cleaning up node entry for remote node")
             self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node)
 
     def setup_env(self, node):
 
         self.remote_node = "remote-%s" % (node)
 
         # we are assuming if all nodes have a key, that it is
         # the right key... If any node doesn't have a remote
         # key, we regenerate it everywhere.
         if self.rsh.exists_on_all("/etc/pacemaker/authkey", self.Env["nodes"]):
             return
 
         # create key locally
         (handle, keyfile) = tempfile.mkstemp(".cts")
         os.close(handle)
         subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"],
             stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
 
         # sync key throughout the cluster
         for node in self.Env["nodes"]:
             self.rsh(node, "mkdir -p --mode=0750 /etc/pacemaker")
             self.rsh.copy(keyfile, "root@%s:/etc/pacemaker/authkey" % node)
             self.rsh(node, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey")
             self.rsh(node, "chmod 0640 /etc/pacemaker/authkey")
         os.unlink(keyfile)
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return False
 
         for node in self.Env["nodes"]:
             (rc, _) = self.rsh(node, "which pacemaker-remoted >/dev/null 2>&1")
             if rc != 0:
                 return False
         return True
 
     def start_new_test(self, node):
         self.incr("calls")
         self.reset()
 
         ret = self.startall(None)
         if not ret:
             return self.failure("setup failed: could not start all nodes")
 
         self.setup_env(node)
         self.start_metal(node)
         self.add_dummy_rsc(node)
         return True
 
     def __call__(self, node):
         return self.failure("This base class is not meant to be called directly.")
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [ r"""is running on remote.*which isn't allowed""",
                  r"""Connection terminated""",
                  r"""Could not send remote""",
                 ]
 
 # RemoteDriver is just a base class for other tests, so it is not added to AllTestClasses
 
 
 class RemoteBasic(RemoteDriver):
 
     def __call__(self, node):
         '''Perform the 'RemoteBaremetal' test. '''
 
         if not self.start_new_test(node):
             return self.failure(self.fail_string)
 
         self.test_attributes(node)
         self.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.failed:
             return self.failure(self.fail_string)
 
         return self.success()
 
 AllTestClasses.append(RemoteBasic)
 
 class RemoteStonithd(RemoteDriver):
 
     def __call__(self, node):
         '''Perform the 'RemoteStonithd' test. '''
 
         if not self.start_new_test(node):
             return self.failure(self.fail_string)
 
         self.fail_connection(node)
         self.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.failed:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def is_applicable(self):
         if not RemoteDriver.is_applicable(self):
             return False
 
         if "DoFencing" in list(self.Env.keys()):
             return self.Env["DoFencing"]
 
         return True
 
     def errorstoignore(self):
         ignore_pats = [
             r"Lost connection to Pacemaker Remote node",
             r"Software caused connection abort",
             r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor",
             r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*",
             r"schedulerd.*:\s+Recover\s+remote-.*\s+\(.*\)",
             r"error: Result of monitor operation for .* on remote-.*: Internal communication failure",
         ]
 
         ignore_pats.extend(RemoteDriver.errorstoignore(self))
         return ignore_pats
 
 AllTestClasses.append(RemoteStonithd)
 
 
 class RemoteMigrate(RemoteDriver):
 
     def __call__(self, node):
         '''Perform the 'RemoteMigrate' test. '''
 
         if not self.start_new_test(node):
             return self.failure(self.fail_string)
 
         self.migrate_connection(node)
         self.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.failed:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def is_applicable(self):
         if not RemoteDriver.is_applicable(self):
             return 0
         # This test requires at least three nodes: one to convert to a
         # remote node, one to host the connection originally, and one
         # to migrate the connection to.
         if len(self.Env["nodes"]) < 3:
             return 0
         return 1
 
 AllTestClasses.append(RemoteMigrate)
 
 
 class RemoteRscFailure(RemoteDriver):
 
     def __call__(self, node):
         '''Perform the 'RemoteRscFailure' test. '''
 
         if not self.start_new_test(node):
             return self.failure(self.fail_string)
 
         # This is an important step. We are migrating the connection
         # before failing the resource. This verifies that the migration
         # has properly maintained control over the remote-node.
         self.migrate_connection(node)
 
         self.fail_rsc(node)
         self.cleanup_metal(node)
 
         self.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.failed:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def errorstoignore(self):
         ignore_pats = [
             r"schedulerd.*: Recover\s+remote-rsc\s+\(.*\)",
             r"Dummy.*: No process state file found",
         ]
 
         ignore_pats.extend(RemoteDriver.errorstoignore(self))
         return ignore_pats
 
     def is_applicable(self):
         if not RemoteDriver.is_applicable(self):
             return 0
         # This test requires at least three nodes: one to convert to a
         # remote node, one to host the connection originally, and one
         # to migrate the connection to.
         if len(self.Env["nodes"]) < 3:
             return 0
         return 1
 
 AllTestClasses.append(RemoteRscFailure)
 
 # vim:ts=4:sw=4:et:
diff --git a/cts/lab/ClusterManager.py b/cts/lab/ClusterManager.py
index e4815bca04..5e54466407 100644
--- a/cts/lab/ClusterManager.py
+++ b/cts/lab/ClusterManager.py
@@ -1,1085 +1,1085 @@
 """ ClusterManager class for Pacemaker's Cluster Test Suite (CTS)
 """
 
 __copyright__ = """Copyright 2000-2023 the Pacemaker project contributors.
 Certain portions by Huang Zhen <zhenhltc@cn.ibm.com> are copyright 2004
 International Business Machines. The version control history for this file
 may have further details."""
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import os
 import re
 import time
 
 from collections import UserDict
 
 from cts.CIB         import ConfigFactory
 from cts.CTS         import NodeStatus, Process
 from cts.CTStests    import AuditResource
 from cts.watcher     import LogWatcher
 
 from pacemaker.buildoptions import BuildOptions
 from pacemaker._cts.environment import EnvFactory
 from pacemaker._cts.logging import LogFactory
 from pacemaker._cts.patterns import PatternSelector
 from pacemaker._cts.remote import RemoteFactory
 
 has_log_stats = {}
 log_stats_bin = BuildOptions.DAEMON_DIR + "/cts_log_stats.sh"
 log_stats = """
 #!%s
 # Tool for generating system load reports while CTS runs
 
 trap "" 1
 
 f=$1; shift
 action=$1; shift
 base=`basename $0`
 
 if [ ! -e $f ]; then
     echo "Time, Load 1, Load 5, Load 15, Test Marker" > $f
 fi
 
 function killpid() {
     if [ -e $f.pid ]; then
        kill -9 `cat $f.pid`
        rm -f $f.pid
     fi
 }
 
 function status() {
     if [ -e $f.pid ]; then
        kill -0 `cat $f.pid`
        return $?
     else
        return 1
     fi
 }
 
 function start() {
     # Is it already running?
     if
         status
     then
         return
     fi
 
     echo Active as $$
     echo $$ > $f.pid
 
     while [ 1 = 1 ]; do
         uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
         #top -b -c -n1 | grep -e usr/libexec/pacemaker | grep -v -e grep -e python | head -n 1 | sed s@/usr/libexec/pacemaker/@@ | awk '{print " 0, "$9", "$10", "$12}' | tr '\\n' ',' >> $f
         echo 0 >> $f
         sleep 5
     done
 }
 
 case $action in
     start)
         start
         ;;
     start-bg|bg)
         # Use c --ssh -- ./stats.sh file start-bg
         nohup $0 $f start >/dev/null 2>&1 </dev/null &
         ;;
     stop)
         killpid
         ;;
     delete)
         killpid
         rm -f $f
         ;;
     mark)
         uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
         echo " $*" >> $f
         start
         ;;
     *)
         echo "Unknown action: $action."
         ;;
 esac
 """ % (BuildOptions.BASH_PATH)
 
 class ClusterManager(UserDict):
     '''The Cluster Manager class.
     This is an subclass of the Python dictionary class.
     (this is because it contains lots of {name,value} pairs,
     not because it's behavior is that terribly similar to a
     dictionary in other ways.)
 
     This is an abstract class which class implements high-level
     operations on the cluster and/or its cluster managers.
     Actual cluster managers classes are subclassed from this type.
 
     One of the things we do is track the state we think every node should
     be in.
     '''
 
     def __InitialConditions(self):
         #if os.geteuid() != 0:
         #  raise ValueError("Must Be Root!")
         None
 
     def _finalConditions(self):
         for key in list(self.keys()):
             if self[key] == None:
                 raise ValueError("Improper derivation: self[" + key +   "] must be overridden by subclass.")
 
     def __init__(self):
         self.Env = EnvFactory().getInstance()
         self.templates = PatternSelector(self.Env["Name"])
         self.__InitialConditions()
         self.logger = LogFactory()
         self.TestLoggingLevel=0
         self.data = {}
         self.name = self.Env["Name"]
 
         self.rsh = RemoteFactory().getInstance()
         self.ShouldBeStatus={}
         self.ns = NodeStatus(self.Env)
         self.OurNode = os.uname()[1].lower()
         self.__instance_errorstoignore = []
 
         self.fastfail = 0
         self.cib_installed = 0
         self.config = None
         self.cluster_monitor = 0
         self.use_short_names = 1
 
         if self.Env["DoBSC"]:
             del self.templates["Pat:They_stopped"]
 
         self._finalConditions()
 
         self.check_transitions = 0
         self.check_elections = 0
         self.CIBsync = {}
         self.CibFactory = ConfigFactory(self)
         self.cib = self.CibFactory.createConfig(self.Env["Schema"])
 
     def __getitem__(self, key):
         if key == "Name":
             return self.name
 
         print("FIXME: Getting %s from %s" % (key, repr(self)))
         if key in self.data:
             return self.data[key]
 
         return self.templates.get_patterns(key)
 
     def __setitem__(self, key, value):
         print("FIXME: Setting %s=%s on %s" % (key, value, repr(self)))
         self.data[key] = value
 
     def key_for_node(self, node):
         return node
 
     def instance_errorstoignore_clear(self):
         '''Allows the test scenario to reset instance errors to ignore on each iteration.'''
         self.__instance_errorstoignore = []
 
     def instance_errorstoignore(self):
         '''Return list of errors which are 'normal' for a specific test instance'''
         return self.__instance_errorstoignore
 
     def log(self, args):
         self.logger.log(args)
 
     def debug(self, args):
         self.logger.debug(args)
 
     def upcount(self):
         '''How many nodes are up?'''
         count = 0
         for node in self.Env["nodes"]:
           if self.ShouldBeStatus[node] == "up":
             count = count + 1
         return count
 
     def install_support(self, command="install"):
         for node in self.Env["nodes"]:
             self.rsh(node, BuildOptions.DAEMON_DIR + "/cts-support " + command)
 
     def prepare_fencing_watcher(self, name):
         # If we don't have quorum now but get it as a result of starting this node,
         # then a bunch of nodes might get fenced
         upnode = None
         if self.HasQuorum(None):
             self.debug("Have quorum")
             return None
 
         if not self.templates["Pat:Fencing_start"]:
             print("No start pattern")
             return None
 
         if not self.templates["Pat:Fencing_ok"]:
             print("No ok pattern")
             return None
 
         stonith = None
         stonithPats = []
         for peer in self.Env["nodes"]:
             if self.ShouldBeStatus[peer] != "up":
                 stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                 stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
 
         stonith = LogWatcher(self.Env["LogFileName"], stonithPats, "StartupFencing", 0, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
         stonith.setwatch()
         return stonith
 
     def fencing_cleanup(self, node, stonith):
         peer_list = []
         peer_state = {}
 
         self.debug("Looking for nodes that were fenced as a result of %s starting" % node)
 
         # If we just started a node, we may now have quorum (and permission to fence)
         if not stonith:
             self.debug("Nothing to do")
             return peer_list
 
         q = self.HasQuorum(None)
         if not q and len(self.Env["nodes"]) > 2:
             # We didn't gain quorum - we shouldn't have shot anyone
             self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"])))
             return peer_list
 
         for n in self.Env["nodes"]:
             peer_state[n] = "unknown"
 
         # Now see if any states need to be updated
         self.debug("looking for: " + repr(stonith.regexes))
         shot = stonith.look(0)
         while shot:
             line = repr(shot)
             self.debug("Found: " + line)
             del stonith.regexes[stonith.whichmatch]
 
             # Extract node name
             for n in self.Env["nodes"]:
                 if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
                     peer = n
                     peer_state[peer] = "complete"
                     self.__instance_errorstoignore.append(self.templates["Pat:Fencing_ok"] % peer)
 
                 elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
                     # TODO: Correctly detect multiple fencing operations for the same host
                     peer = n
                     peer_state[peer] = "in-progress"
                     self.__instance_errorstoignore.append(self.templates["Pat:Fencing_start"] % peer)
 
             if not peer:
                 self.logger.log("ERROR: Unknown stonith match: %s" % line)
 
             elif not peer in peer_list:
                 self.debug("Found peer: " + peer)
                 peer_list.append(peer)
 
             # Get the next one
             shot = stonith.look(60)
 
         for peer in peer_list:
 
             self.debug("   Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
             if self.Env["at-boot"]:
                 self.ShouldBeStatus[peer] = "up"
             else:
                 self.ShouldBeStatus[peer] = "down"
 
             if peer_state[peer] == "in-progress":
                 # Wait for any in-progress operations to complete
                 shot = stonith.look(60)
                 while len(stonith.regexes) and shot:
                     line = repr(shot)
                     self.debug("Found: " + line)
                     del stonith.regexes[stonith.whichmatch]
                     shot = stonith.look(60)
 
             # Now make sure the node is alive too
             self.ns.WaitForNodeToComeUp(peer, self.Env["DeadTime"])
 
             # Poll until it comes up
             if self.Env["at-boot"]:
                 if not self.StataCM(peer):
                     time.sleep(self.Env["StartTime"])
 
                 if not self.StataCM(peer):
                     self.logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
                     return None
 
         return peer_list
 
     def StartaCM(self, node, verbose=False):
 
         '''Start up the cluster manager on a given node'''
         if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node))
         else: self.debug("Starting %s on node %s" % (self.templates["Name"], node))
         ret = 1
 
         if not node in self.ShouldBeStatus:
             self.ShouldBeStatus[node] = "down"
 
         if self.ShouldBeStatus[node] != "down":
             return 1
 
         patterns = []
         # Technically we should always be able to notice ourselves starting
         patterns.append(self.templates["Pat:Local_started"] % node)
         if self.upcount() == 0:
             patterns.append(self.templates["Pat:DC_started"] % node)
         else:
             patterns.append(self.templates["Pat:NonDC_started"] % node)
 
         watch = LogWatcher(
             self.Env["LogFileName"], patterns, "StartaCM", self.Env["StartTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
 
         self.install_config(node)
 
         self.ShouldBeStatus[node] = "any"
         if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
             self.logger.log ("%s was already started" % (node))
             return 1
 
         stonith = self.prepare_fencing_watcher(node)
         watch.setwatch()
 
         (rc, _) = self.rsh(node, self.templates["StartCmd"])
         if rc != 0:
             self.logger.log ("Warn: Start command failed on node %s" % (node))
             self.fencing_cleanup(node, stonith)
             return None
 
         self.ShouldBeStatus[node] = "up"
         watch_result = watch.lookforall()
 
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
 
         if watch_result and self.cluster_stable(self.Env["DeadTime"]):
             #self.debug("Found match: "+ repr(watch_result))
             self.fencing_cleanup(node, stonith)
             return 1
 
         elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
             self.fencing_cleanup(node, stonith)
             return 1
 
         self.logger.log ("Warn: Start failed for node %s" % (node))
         return None
 
     def StartaCMnoBlock(self, node, verbose=False):
 
         '''Start up the cluster manager on a given node with none-block mode'''
 
         if verbose: self.logger.log("Starting %s on node %s" % (self["Name"], node))
         else: self.debug("Starting %s on node %s" % (self["Name"], node))
 
         self.install_config(node)
         self.rsh(node, self.templates["StartCmd"], synchronous=False)
         self.ShouldBeStatus[node] = "up"
         return 1
 
     def StopaCM(self, node, verbose=False, force=False):
 
         '''Stop the cluster manager on a given node'''
 
         if verbose: self.logger.log("Stopping %s on node %s" % (self["Name"], node))
         else: self.debug("Stopping %s on node %s" % (self["Name"], node))
 
         if self.ShouldBeStatus[node] != "up" and force == False:
             return 1
 
         (rc, _) = self.rsh(node, self.templates["StopCmd"])
         if rc == 0:
             # Make sure we can continue even if corosync leaks
             # fdata-* is the old name
             #self.rsh(node, "rm -rf /dev/shm/qb-* /dev/shm/fdata-*")
             self.ShouldBeStatus[node] = "down"
             self.cluster_stable(self.Env["DeadTime"])
             return 1
         else:
             self.logger.log ("ERROR: Could not stop %s on node %s" % (self["Name"], node))
 
         return None
 
     def StopaCMnoBlock(self, node):
 
         '''Stop the cluster manager on a given node with none-block mode'''
 
         self.debug("Stopping %s on node %s" % (self["Name"], node))
 
         self.rsh(node, self.templates["StopCmd"], synchronous=False)
         self.ShouldBeStatus[node] = "down"
         return 1
 
     def RereadCM(self, node):
 
         '''Force the cluster manager on a given node to reread its config
            This may be a no-op on certain cluster managers.
         '''
         (rc, _) = self.rsh(node, self.templates["RereadCmd"])
         if rc == 0:
             return 1
         else:
             self.logger.log ("Could not force %s on node %s to reread its config"
             %        (self["Name"], node))
         return None
 
     def startall(self, nodelist=None, verbose=False, quick=False):
 
         '''Start the cluster manager on every node in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
         map = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
 
         for node in nodelist:
             if self.ShouldBeStatus[node] == "down":
                 self.ns.WaitForAllNodesToComeUp(nodelist, 300)
 
         if not quick:
             # This is used for "basic sanity checks", so only start one node ...
             if not self.StartaCM(node, verbose=verbose):
                 return 0
             return 1
 
         # Approximation of SimulStartList for --boot
         watchpats = [ ]
         watchpats.append(self.templates["Pat:DC_IDLE"])
         for node in nodelist:
             watchpats.append(self.templates["Pat:InfraUp"] % node)
             watchpats.append(self.templates["Pat:PacemakerUp"] % node)
             watchpats.append(self.templates["Pat:Local_started"] % node)
             watchpats.append(self.templates["Pat:They_up"] % (nodelist[0], node))
 
         #   Start all the nodes - at about the same time...
         watch = LogWatcher(self.Env["LogFileName"], watchpats, "fast-start", self.Env["DeadTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
         watch.setwatch()
 
         if not self.StartaCM(nodelist[0], verbose=verbose):
             return 0
         for node in nodelist:
             self.StartaCMnoBlock(node, verbose=verbose)
 
         watch.lookforall()
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
 
         if not self.cluster_stable():
             self.logger.log("Cluster did not stabilize")
             return 0
 
         return 1
 
     def stopall(self, nodelist=None, verbose=False, force=False):
 
         '''Stop the cluster managers on every node in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
 
         ret = 1
         map = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up" or force == True:
                 if not self.StopaCM(node, verbose=verbose, force=force):
                     ret = 0
         return ret
 
     def rereadall(self, nodelist=None):
 
         '''Force the cluster managers on every node in the cluster
         to reread their config files.  We can do it on a subset of the
         cluster if nodelist is not None.
         '''
 
         map = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up":
                 self.RereadCM(node)
 
     def statall(self, nodelist=None):
 
         '''Return the status of the cluster managers in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
 
         result = {}
         if not nodelist:
             nodelist = self.Env["nodes"]
         for node in nodelist:
             if self.StataCM(node):
                 result[node] = "up"
             else:
                 result[node] = "down"
         return result
 
     def isolate_node(self, target, nodes=None):
         '''isolate the communication between the nodes'''
         if not nodes:
             nodes = self.Env["nodes"]
 
         for node in nodes:
             if node != target:
                 rc = self.rsh(target, self.templates["BreakCommCmd"] % self.key_for_node(node))
                 if rc != 0:
                     self.logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
                     return None
                 else:
                     self.debug("Communication cut between %s and %s" % (target, node))
         return 1
 
     def unisolate_node(self, target, nodes=None):
         '''fix the communication between the nodes'''
         if not nodes:
             nodes = self.Env["nodes"]
 
         for node in nodes:
             if node != target:
                 restored = 0
 
                 # Limit the amount of time we have asynchronous connectivity for
                 # Restore both sides as simultaneously as possible
                 self.rsh(target, self.templates["FixCommCmd"] % self.key_for_node(node), synchronous=False)
                 self.rsh(node, self.templates["FixCommCmd"] % self.key_for_node(target), synchronous=False)
                 self.debug("Communication restored between %s and %s" % (target, node))
 
     def reducecomm_node(self,node):
         '''reduce the communication between the nodes'''
         (rc, _) = self.rsh(node, self.templates["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"]))
         if rc == 0:
             return 1
         else:
             self.logger.log("Could not reduce the communication between the nodes from node: %s" % node)
         return None
 
     def restorecomm_node(self,node):
         '''restore the saved communication between the nodes'''
         rc = 0
         if float(self.Env["XmitLoss"]) != 0 or float(self.Env["RecvLoss"]) != 0 :
             (rc, _) = self.rsh(node, self.templates["RestoreCommCmd"])
         if rc == 0:
             return 1
         else:
             self.logger.log("Could not restore the communication between the nodes from node: %s" % node)
         return None
 
     def oprofileStart(self, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileStart(n)
 
         elif node in self.Env["oprofile"]:
             self.debug("Enabling oprofile on %s" % node)
             self.rsh(node, "opcontrol --init")
             self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
             self.rsh(node, "opcontrol --start")
             self.rsh(node, "opcontrol --reset")
 
     def oprofileSave(self, test, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileSave(test, n)
 
         elif node in self.Env["oprofile"]:
             self.rsh(node, "opcontrol --dump")
             self.rsh(node, "opcontrol --save=cts.%d" % test)
             # Read back with: opreport -l session:cts.0 image:<directory>/c*
             if None:
                 self.rsh(node, "opcontrol --reset")
             else:
                 self.oprofileStop(node)
                 self.oprofileStart(node)
 
     def oprofileStop(self, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileStop(n)
 
         elif node in self.Env["oprofile"]:
             self.debug("Stopping oprofile on %s" % node)
             self.rsh(node, "opcontrol --reset")
             self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
 
 
     def StatsExtract(self):
         if not self.Env["stats"]:
             return
 
         for host in self.Env["nodes"]:
             log_stats_file = "%s/cts-stats.csv" % BuildOptions.DAEMON_DIR
             if host in has_log_stats:
                 self.rsh(host, '''bash %s %s stop''' % (log_stats_bin, log_stats_file))
                 (_, lines) = self.rsh(host, '''cat %s''' % log_stats_file, verbose=1)
                 self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
 
                 fname = "cts-stats-%d-nodes-%s.csv" % (len(self.Env["nodes"]), host)
                 print("Extracted stats: %s" % fname)
                 fd = open(fname, "a")
                 fd.writelines(lines)
                 fd.close()
 
     def StatsMark(self, testnum):
         '''Mark the test number in the stats log'''
 
         global has_log_stats
         if not self.Env["stats"]:
             return
 
         for host in self.Env["nodes"]:
             log_stats_file = "%s/cts-stats.csv" % BuildOptions.DAEMON_DIR
             if not host in has_log_stats:
 
                 global log_stats
                 global log_stats_bin
                 script=log_stats
                 #script = re.sub("\\\\", "\\\\", script)
                 script = re.sub('\"', '\\\"', script)
                 script = re.sub("'", "\'", script)
                 script = re.sub("`", "\`", script)
                 script = re.sub("\$", "\\\$", script)
 
                 self.debug("Installing %s on %s" % (log_stats_bin, host))
                 self.rsh(host, '''echo "%s" > %s''' % (script, log_stats_bin), verbose=0)
                 self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
                 has_log_stats[host] = 1
 
             # Now mark it
             self.rsh(host, '''bash %s %s mark %s''' % (log_stats_bin, log_stats_file, testnum), synchronous=False)
 
     def errorstoignore(self):
         # At some point implement a more elegant solution that
         #   also produces a report at the end
         """ Return a list of known error messages that should be ignored """
         return self.templates.get_patterns("BadNewsIgnore")
 
     def install_config(self, node):
         if not self.ns.WaitForNodeToComeUp(node):
             self.log("Node %s is not up." % node)
             return None
 
         if not node in self.CIBsync and self.Env["ClobberCIB"] == 1:
             self.CIBsync[node] = 1
             self.rsh(node, "rm -f " + BuildOptions.CIB_DIR + "/cib*")
 
             # Only install the CIB on the first node, all the other ones will pick it up from there
             if self.cib_installed == 1:
                 return None
 
             self.cib_installed = 1
             if self.Env["CIBfilename"] == None:
                 self.log("Installing Generated CIB on node %s" % (node))
                 self.cib.install(node)
 
             else:
                 self.log("Installing CIB (%s) on node %s" % (self.Env["CIBfilename"], node))
                 if self.rsh.copy(self.Env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node)) != 0:
                     raise ValueError("Can not scp file to %s %d"%(node))
 
             self.rsh(node, "chown " + BuildOptions.DAEMON_USER + " " + BuildOptions.CIB_DIR + "/cib.xml")
 
     def prepare(self):
         '''Finish the Initialization process. Prepare to test...'''
 
         self.partitions_expected = 1
         for node in self.Env["nodes"]:
             self.ShouldBeStatus[node] = ""
             if self.Env["experimental-tests"]:
                 self.unisolate_node(node)
             self.StataCM(node)
 
     def test_node_CM(self, node):
         '''Report the status of the cluster manager on a given node'''
 
         watchpats = [ ]
         watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)")
         watchpats.append(self.templates["Pat:NonDC_started"] % node)
         watchpats.append(self.templates["Pat:DC_started"] % node)
         idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterIdle", hosts=[node], kind=self.Env["LogWatcher"])
         idle_watch.setwatch()
 
         (_, out) = self.rsh(node, self.templates["StatusCmd"]%node, verbose=1)
 
         if not out:
             out = ""
         else:
             out = out[0].strip()
 
         self.debug("Node %s status: '%s'" %(node, out))
 
         if out.find('ok') < 0:
             if self.ShouldBeStatus[node] == "up":
                 self.log(
                     "Node status for %s is %s but we think it should be %s"
                     % (node, "down", self.ShouldBeStatus[node]))
             self.ShouldBeStatus[node] = "down"
             return 0
 
         if self.ShouldBeStatus[node] == "down":
             self.log(
                 "Node status for %s is %s but we think it should be %s: %s"
                 % (node, "up", self.ShouldBeStatus[node], out))
 
         self.ShouldBeStatus[node] = "up"
 
         # check the output first - because syslog-ng loses messages
         if out.find('S_NOT_DC') != -1:
             # Up and stable
             return 2
         if out.find('S_IDLE') != -1:
             # Up and stable
             return 2
 
         # fall back to syslog-ng and wait
         if not idle_watch.look():
             # just up
             self.debug("Warn: Node %s is unstable: %s" % (node, out))
             return 1
 
         # Up and stable
         return 2
 
     # Is the node up or is the node down
     def StataCM(self, node):
         '''Report the status of the cluster manager on a given node'''
 
         if self.test_node_CM(node) > 0:
             return 1
         return None
 
     # Being up and being stable is not the same question...
     def node_stable(self, node):
         '''Report the status of the cluster manager on a given node'''
 
         if self.test_node_CM(node) == 2:
             return 1
         self.log("Warn: Node %s not stable" % (node))
         return None
 
     def partition_stable(self, nodes, timeout=None):
         watchpats = [ ]
         watchpats.append("Current ping state: S_IDLE")
         watchpats.append(self.templates["Pat:DC_IDLE"])
         self.debug("Waiting for cluster stability...")
 
         if timeout == None:
             timeout = self.Env["DeadTime"]
 
         if len(nodes) < 3:
             self.debug("Cluster is inactive")
             return 1
 
         idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterStable", timeout, hosts=nodes.split(), kind=self.Env["LogWatcher"])
         idle_watch.setwatch()
 
         for node in nodes.split():
             # have each node dump its current state
             self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
 
         ret = idle_watch.look()
         while ret:
             self.debug(ret)
             for node in nodes.split():
                 if re.search(node, ret):
                     return 1
             ret = idle_watch.look()
 
         self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout))
         return None
 
     def cluster_stable(self, timeout=None, double_check=False):
         partitions = self.find_partitions()
 
         for partition in partitions:
             if not self.partition_stable(partition, timeout):
                 return None
 
         if double_check:
             # Make sure we are really stable and that all resources,
             # including those that depend on transient node attributes,
             # are started if they were going to be
             time.sleep(5)
             for partition in partitions:
                 if not self.partition_stable(partition, timeout):
                     return None
 
         return 1
 
     def is_node_dc(self, node, status_line=None):
         rc = 0
 
         if not status_line:
             (_, out) = self.rsh(node, self.templates["StatusCmd"]%node, verbose=1)
 
             if out:
                 status_line = out[0].strip()
 
         if not status_line:
             rc = 0
         elif status_line.find('S_IDLE') != -1:
             rc = 1
         elif status_line.find('S_INTEGRATION') != -1:
             rc = 1
         elif status_line.find('S_FINALIZE_JOIN') != -1:
             rc = 1
         elif status_line.find('S_POLICY_ENGINE') != -1:
             rc = 1
         elif status_line.find('S_TRANSITION_ENGINE') != -1:
             rc = 1
 
         return rc
 
     def active_resources(self, node):
         (_, output) = self.rsh(node, "crm_resource -c", verbose=1)
         resources = []
         for line in output:
             if re.search("^Resource", line):
                 tmp = AuditResource(self, line)
                 if tmp.type == "primitive" and tmp.host == node:
                     resources.append(tmp.id)
         return resources
 
     def ResourceLocation(self, rid):
         ResourceNodes = []
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up":
 
                 cmd = self.templates["RscRunning"] % (rid)
                 (rc, lines) = self.rsh(node, cmd)
 
                 if rc == 127:
                     self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd)
                     for line in lines:
                         self.log("Output: "+line)
                 elif rc == 0:
                     ResourceNodes.append(node)
 
         return ResourceNodes
 
     def find_partitions(self):
         ccm_partitions = []
 
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up":
                 (_, out) = self.rsh(node, self.templates["PartitionCmd"], verbose=1)
 
                 if not out:
                     self.log("no partition details for %s" % node)
                     continue
 
                 partition = out[0].strip()
 
                 if len(partition) > 2:
                     nodes = partition.split()
                     nodes.sort()
                     partition = ' '.join(nodes)
 
                     found = 0
                     for a_partition in ccm_partitions:
                         if partition == a_partition:
                             found = 1
                     if found == 0:
                         self.debug("Adding partition from %s: %s" % (node, partition))
                         ccm_partitions.append(partition)
                     else:
                         self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node))
 
                 else:
                     self.log("bad partition details for %s" % node)
             else:
                 self.debug("Node %s is down... skipping" % node)
 
         self.debug("Found partitions: %s" % repr(ccm_partitions) )
         return ccm_partitions
 
     def HasQuorum(self, node_list):
         # If we are auditing a partition, then one side will
         #   have quorum and the other not.
         # So the caller needs to tell us which we are checking
         # If no value for node_list is specified... assume all nodes
         if not node_list:
             node_list = self.Env["nodes"]
 
         for node in node_list:
             if self.ShouldBeStatus[node] == "up":
                 (_, quorum) = self.rsh(node, self.templates["QuorumCmd"], verbose=1)
                 quorum = quorum[0].strip()
 
                 if quorum.find("1") != -1:
                     return 1
                 elif quorum.find("0") != -1:
                     return 0
                 else:
                     self.debug("WARN: Unexpected quorum test result from " + node + ":" + quorum)
 
         return 0
 
     def Components(self):
         complist = []
         common_ignore = [
                     "Pending action:",
                     "(ERROR|error): crm_log_message_adv:",
                     "(ERROR|error): MSG: No message to dump",
                     "pending LRM operations at shutdown",
                     "Lost connection to the CIB manager",
                     "Connection to the CIB terminated...",
                     "Sending message to the CIB manager FAILED",
                     "Action A_RECOVER .* not supported",
                     "(ERROR|error): stonithd_op_result_ready: not signed on",
                     "pingd.*(ERROR|error): send_update: Could not send update",
                     "send_ipc_message: IPC Channel to .* is not connected",
                     "unconfirmed_actions: Waiting on .* unconfirmed actions",
                     "cib_native_msgready: Message pending on command channel",
                     r": Performing A_EXIT_1 - forcefully exiting ",
                     r"Resource .* was active at shutdown.  You may ignore this error if it is unmanaged.",
             ]
 
         stonith_ignore = [
             r"Updating failcount for child_DoFencing",
             r"error.*: Fencer connection failed \(will retry\)",
             "pacemaker-execd.*(ERROR|error): stonithd_receive_ops_result failed.",
              ]
 
         stonith_ignore.extend(common_ignore)
 
         ccm = Process(self, "ccm", triggersreboot=self.fastfail, pats = [
                     "State transition .* S_RECOVERY",
                     "pacemaker-controld.*Action A_RECOVER .* not supported",
                     r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
                     r"pacemaker-controld.*: Could not recover from internal error",
                     "pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
                     # these status numbers are likely wrong now
                     r"pacemaker-controld.*exited with status 2",
                     r"attrd.*exited with status 1",
                     r"cib.*exited with status 2",
 
 # Not if it was fenced
 #                    "A new node joined the cluster",
 
 #                    "WARN: determine_online_status: Node .* is unclean",
 #                    "Scheduling node .* for fencing",
 #                    "Executing .* fencing operation",
 #                    "tengine_stonith_callback: .*result=0",
 #                    "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE",
 #                    "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN",
                     "State transition S_STARTING -> S_PENDING",
                     ], badnews_ignore = common_ignore)
 
         based = Process(self, "pacemaker-based", triggersreboot=self.fastfail, pats = [
                     "State transition .* S_RECOVERY",
                     "Lost connection to the CIB manager",
                     "Connection to the CIB manager terminated",
                     r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
                     "pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
                     r"pacemaker-controld.*: Could not recover from internal error",
                     # these status numbers are likely wrong now
                     r"pacemaker-controld.*exited with status 2",
                     r"attrd.*exited with status 1",
                     ], badnews_ignore = common_ignore)
 
         execd = Process(self, "pacemaker-execd", triggersreboot=self.fastfail, pats = [
                     "State transition .* S_RECOVERY",
                     "LRM Connection failed",
                     "pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
                     "State transition S_STARTING -> S_PENDING",
                     r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
                     r"pacemaker-controld.*: Could not recover from internal error",
                     # this status number is likely wrong now
                     r"pacemaker-controld.*exited with status 2",
                     ], badnews_ignore = common_ignore)
 
         controld = Process(self, "pacemaker-controld", triggersreboot=self.fastfail,
                     pats = [
 #                    "WARN: determine_online_status: Node .* is unclean",
 #                    "Scheduling node .* for fencing",
 #                    "Executing .* fencing operation",
 #                    "tengine_stonith_callback: .*result=0",
                     "State transition .* S_IDLE",
                     "State transition S_STARTING -> S_PENDING",
                     ], badnews_ignore = common_ignore)
 
         schedulerd = Process(self, "pacemaker-schedulerd", triggersreboot=self.fastfail, pats = [
                     "State transition .* S_RECOVERY",
                     r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
                     r"pacemaker-controld.*: Could not recover from internal error",
                     r"pacemaker-controld.*CRIT.*: Connection to the scheduler failed",
                     "pacemaker-controld.*I_ERROR.*save_cib_contents",
                     # this status number is likely wrong now
                     r"pacemaker-controld.*exited with status 2",
                     ], badnews_ignore = common_ignore, dc_only=1)
 
-        if self.Env["DoFencing"] == 1 :
+        if self.Env["DoFencing"]:
             complist.append(Process(self, "stoniths", triggersreboot=self.fastfail, dc_pats = [
                         r"pacemaker-controld.*CRIT.*: Fencing daemon connection failed",
                         "Attempting connection to fencing daemon",
                     ], badnews_ignore = stonith_ignore))
 
         if self.fastfail == 0:
             ccm.pats.extend([
                 # these status numbers are likely wrong now
                 r"attrd.*exited with status 1",
                 r"pacemaker-(based|controld).*exited with status 2",
                 ])
             based.pats.extend([
                 # these status numbers are likely wrong now
                 r"attrd.*exited with status 1",
                 r"pacemaker-controld.*exited with status 2",
                 ])
             execd.pats.extend([
                 # these status numbers are likely wrong now
                 r"pacemaker-controld.*exited with status 2",
                 ])
 
         complist.append(ccm)
         complist.append(based)
         complist.append(execd)
         complist.append(controld)
         complist.append(schedulerd)
 
         return complist
 
     def StandbyStatus(self, node):
         (_, out) = self.rsh(node, self.templates["StandbyQueryCmd"] % node, verbose=1)
         if not out:
             return "off"
         out = out[0].strip()
         self.debug("Standby result: "+out)
         return out
 
     # status == "on" : Enter Standby mode
     # status == "off": Enter Active mode
     def SetStandbyMode(self, node, status):
         current_status = self.StandbyStatus(node)
         cmd = self.templates["StandbyCmd"] % (node, status)
         self.rsh(node, cmd)
         return True
 
     def AddDummyRsc(self, node, rid):
         rsc_xml = """ '<resources>
                 <primitive class=\"ocf\" id=\"%s\" provider=\"pacemaker\" type=\"Dummy\">
                     <operations>
                         <op id=\"%s-interval-10s\" interval=\"10s\" name=\"monitor\"/
                     </operations>
                 </primitive>
             </resources>'""" % (rid, rid)
         constraint_xml = """ '<constraints>
                 <rsc_location id=\"location-%s-%s\" node=\"%s\" rsc=\"%s\" score=\"INFINITY\"/>
             </constraints>'
             """ % (rid, node, node, rid)
 
         self.rsh(node, self.templates['CibAddXml'] % (rsc_xml))
         self.rsh(node, self.templates['CibAddXml'] % (constraint_xml))
 
     def RemoveDummyRsc(self, node, rid):
         constraint = "\"//rsc_location[@rsc='%s']\"" % (rid)
         rsc = "\"//primitive[@id='%s']\"" % (rid)
 
         self.rsh(node, self.templates['CibDelXpath'] % constraint)
         self.rsh(node, self.templates['CibDelXpath'] % rsc)
diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py
index ac9933a365..7cfc74bda7 100644
--- a/python/pacemaker/_cts/environment.py
+++ b/python/pacemaker/_cts/environment.py
@@ -1,638 +1,638 @@
 """ Test environment classes for Pacemaker's Cluster Test Suite (CTS)
 """
 
 __copyright__ = "Copyright 2014-2023 the Pacemaker project contributors"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import sys, time, os, socket, random
 
 from pacemaker._cts.logging import LogFactory
 from pacemaker._cts.remote import RemoteFactory
 
 class Environment(object):
 
     def __init__(self, args):
         self.data = {}
         self.Nodes = []
 
         self["DeadTime"] = 300
         self["StartTime"] = 300
         self["StableTime"] = 30
         self["tests"] = []
         self["IPagent"] = "IPaddr2"
-        self["DoFencing"] = 1
+        self["DoFencing"] = True
         self["XmitLoss"] = "0.0"
         self["RecvLoss"] = "0.0"
         self["ClobberCIB"] = 0
         self["CIBfilename"] = None
         self["CIBResource"] = 0
         self["DoBSC"]    = 0
         self["oprofile"] = []
         self["warn-inactive"] = 0
         self["ListTests"] = 0
         self["benchmark"] = 0
         self["LogWatcher"] = "any"
         self["SyslogFacility"] = "daemon"
         self["LogFileName"] = "/var/log/messages"
         self["Schema"] = "pacemaker-3.0"
         self["Stack"] = "corosync"
         self["stonith-type"] = "external/ssh"
         self["stonith-params"] = "hostlist=all,livedangerously=yes"
         self["notification-agent"] = "/var/lib/pacemaker/notify.sh"
         self["notification-recipient"] = "/var/lib/pacemaker/notify.log"
         self["loop-minutes"] = 60
         self["valgrind-procs"] = "pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd"
         self["experimental-tests"] = 0
         self["container-tests"] = 0
         self["valgrind-tests"] = 0
         self["unsafe-tests"] = 1
         self["loop-tests"] = 1
         self["scenario"] = "random"
         self["stats"] = 0
         self["continue"] = 0
 
         self.RandomGen = random.Random()
         self.logger = LogFactory()
 
         self.SeedRandom()
         self.rsh = RemoteFactory().getInstance()
 
         self.target = "localhost"
 
         self.parse_args(args)
         if self["ListTests"] == 0:
             self.validate()
             self.discover()
 
     def SeedRandom(self, seed=None):
         if not seed:
             seed = int(time.time())
 
         self["RandSeed"] = seed
         self.RandomGen.seed(str(seed))
 
     def dump(self):
         keys = []
         for key in list(self.data.keys()):
             keys.append(key)
 
         keys.sort()
         for key in keys:
             self.logger.debug("Environment["+key+"]:\t"+str(self[key]))
 
     def keys(self):
         return list(self.data.keys())
 
     def has_key(self, key):
         if key == "nodes":
             return True
 
         return key in self.data
 
     def __getitem__(self, key):
         if str(key) == "0":
             raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead")
 
         if key == "nodes":
             return self.Nodes
 
         elif key == "Name":
             return self.get_stack_short()
 
         elif key in self.data:
             return self.data[key]
 
         else:
             return None
 
     def __setitem__(self, key, value):
         if key == "Stack":
             self.set_stack(value)
 
         elif key == "node-limit":
             self.data[key] = value
             self.filter_nodes()
 
         elif key == "nodes":
             self.Nodes = []
             for node in value:
                 # I don't think I need the IP address, etc. but this validates
                 # the node name against /etc/hosts and/or DNS, so it's a
                 # GoodThing(tm).
                 try:
                     n = node.strip()
                     socket.gethostbyname_ex(n)
                     self.Nodes.append(n) 
                 except:
                     self.logger.log(node+" not found in DNS... aborting")
                     raise
 
             self.filter_nodes()
 
         else:
             self.data[key] = value
 
     def RandomNode(self):
         '''Choose a random node from the cluster'''
         return self.RandomGen.choice(self["nodes"])
 
     def set_stack(self, name):
         # Normalize stack names
         if name == "corosync" or name == "cs" or name == "mcp":
             self.data["Stack"] = "corosync 2+"
 
         else:
             raise ValueError("Unknown stack: "+name)
 
     def get_stack_short(self):
         # Create the Cluster Manager object
         if not "Stack" in self.data:
             return "unknown"
 
         elif self.data["Stack"] == "corosync 2+":
             return "crm-corosync"
 
         else:
             LogFactory().log("Unknown stack: "+self["stack"])
             raise ValueError("Unknown stack: "+self["stack"])
 
     def detect_syslog(self):
         # Detect syslog variant
         if not "syslogd" in self.data:
             if self["have_systemd"]:
                 # Systemd
                 (_, lines) = self.rsh(self.target, "systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", verbose=1)
                 self["syslogd"] = lines[0].strip()
             else:
                 # SYS-V
                 (_, lines) = self.rsh(self.target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", verbose=1)
                 self["syslogd"] = lines[0].strip()
 
             if not "syslogd" in self.data or not self["syslogd"]:
                 # default
                 self["syslogd"] = "rsyslog"
 
     def disable_service(self, node, service):
         if self["have_systemd"]:
             # Systemd
             (rc, _) = self.rsh(node, "systemctl disable %s" % service)
             return rc
 
         else:
             # SYS-V
             (rc, _) = self.rsh(node, "chkconfig %s off" % service)
             return rc
 
     def enable_service(self, node, service):
         if self["have_systemd"]:
             # Systemd
             (rc, _) = self.rsh(node, "systemctl enable %s" % service)
             return rc
 
         else:
             # SYS-V
             (rc, _) = self.rsh(node, "chkconfig %s on" % service)
             return rc
 
     def service_is_enabled(self, node, service):
         if self["have_systemd"]:
             # Systemd
 
             # With "systemctl is-enabled", we should check if the service is
             # explicitly "enabled" instead of the return code. For example it returns
             # 0 if the service is "static" or "indirect", but they don't really count
             # as "enabled".
             (rc, _) = self.rsh(node, "systemctl is-enabled %s | grep enabled" % service)
             return rc == 0
 
         else:
             # SYS-V
             (rc, _) = self.rsh(node, "chkconfig --list | grep -e %s.*on" % service)
             return rc == 0
 
     def detect_at_boot(self):
         # Detect if the cluster starts at boot
         if not "at-boot" in self.data:
             self["at-boot"] = self.service_is_enabled(self.target, "corosync") \
                               or self.service_is_enabled(self.target, "pacemaker")
 
     def detect_ip_offset(self):
         # Try to determine an offset for IPaddr resources
         if self["CIBResource"] and not "IPBase" in self.data:
             (_, lines) = self.rsh(self.target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0)
             network = lines[0].strip()
 
             (_, lines) = self.rsh(self.target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, verbose=0)
             self["IPBase"] = lines[0].strip()
 
             if not self["IPBase"]:
                 self["IPBase"] = " fe80::1234:56:7890:1000"
                 self.logger.log("Could not determine an offset for IPaddr resources.  Perhaps nmap is not installed on the nodes.")
                 self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
 
             elif int(self["IPBase"].split('.')[3]) >= 240:
                 self.logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s"
                                 % (self["IPBase"], self["IPBase"].split('.')[3]))
                 self["IPBase"] = " fe80::1234:56:7890:1000"
                 self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
 
     def filter_nodes(self):
         if self['node-limit'] is not None and self["node-limit"] > 0:
             if len(self["nodes"]) > self["node-limit"]:
                 self.logger.log("Limiting the number of nodes configured=%d (max=%d)"
                                 %(len(self["nodes"]), self["node-limit"]))
                 while len(self["nodes"]) > self["node-limit"]:
                     self["nodes"].pop(len(self["nodes"])-1)
 
     def validate(self):
         if len(self["nodes"]) < 1:
             print("No nodes specified!")
             sys.exit(1)
 
     def discover(self):
         self.target = random.Random().choice(self["nodes"])
 
         exerciser = socket.gethostname()
 
         # Use the IP where possible to avoid name lookup failures
         for ip in socket.gethostbyname_ex(exerciser)[2]:
             if ip != "127.0.0.1":
                 exerciser = ip
                 break;
         self["cts-exerciser"] = exerciser
 
         if not "have_systemd" in self.data:
             (rc, _) = self.rsh(self.target, "systemctl list-units", verbose=0)
             self["have_systemd"] = rc == 0
 
         self.detect_syslog()
         self.detect_at_boot()
         self.detect_ip_offset()
 
     def parse_args(self, args):
         skipthis=None
 
         if not args:
             args=sys.argv[1:]
 
         for i in range(0, len(args)):
             if skipthis:
                 skipthis=None
                 continue
 
             elif args[i] == "-l" or args[i] == "--limit-nodes":
                 skipthis=1
                 self["node-limit"] = int(args[i+1])
 
             elif args[i] == "-r" or args[i] == "--populate-resources":
                 self["CIBResource"] = 1
                 self["ClobberCIB"] = 1
 
             elif args[i] == "--outputfile":
                 skipthis=1
                 self["OutputFile"] = args[i+1]
                 LogFactory().add_file(self["OutputFile"])
 
             elif args[i] == "-L" or args[i] == "--logfile":
                 skipthis=1
                 self["LogWatcher"] = "remote"
                 self["LogAuditDisabled"] = 1
                 self["LogFileName"] = args[i+1]
 
             elif args[i] == "--ip" or args[i] == "--test-ip-base":
                 skipthis=1
                 self["IPBase"] = args[i+1]
                 self["CIBResource"] = 1
                 self["ClobberCIB"] = 1
 
             elif args[i] == "--oprofile":
                 skipthis=1
                 self["oprofile"] = args[i+1].split(' ')
 
             elif args[i] == "--trunc":
                 self["TruncateLog"]=1
 
             elif args[i] == "--list-tests" or args[i] == "--list" :
                 self["ListTests"]=1
 
             elif args[i] == "--benchmark":
                 self["benchmark"]=1
 
             elif args[i] == "--bsc":
                 self["DoBSC"] = 1
                 self["scenario"] = "basic-sanity"
 
             elif args[i] == "--qarsh":
                 RemoteFactory().enable_qarsh()
 
             elif args[i] == "--yes" or args[i] == "-y":
                 self["continue"] = 1
             elif args[i] == "--stonith" or args[i] == "--fencing":
                 skipthis=1
                 if args[i+1] == "1" or args[i+1] == "yes":
-                    self["DoFencing"]=1
+                    self["DoFencing"] = True
                 elif args[i+1] == "0" or args[i+1] == "no":
-                    self["DoFencing"]=0
+                    self["DoFencing"] = False
                 elif args[i+1] == "rhcs" or args[i+1] == "xvm" or args[i+1] == "virt":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_xvm"
                 elif args[i+1] == "scsi":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_scsi"
                 elif args[i+1] == "ssh" or args[i+1] == "lha":
                     self["DoStonith"]=1
                     self["stonith-type"] = "external/ssh"
                     self["stonith-params"] = "hostlist=all,livedangerously=yes"
                 elif args[i+1] == "north":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=north-apc,login=apc,passwd=apc,pcmk_host_map=north-01:2;north-02:3;north-03:4;north-04:5;north-05:6;north-06:7;north-07:9;north-08:10;north-09:11;north-10:12;north-11:13;north-12:14;north-13:15;north-14:18;north-15:17;north-16:19;"
                 elif args[i+1] == "south":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=south-apc,login=apc,passwd=apc,pcmk_host_map=south-01:2;south-02:3;south-03:4;south-04:5;south-05:6;south-06:7;south-07:9;south-08:10;south-09:11;south-10:12;south-11:13;south-12:14;south-13:15;south-14:18;south-15:17;south-16:19;"
                 elif args[i+1] == "east":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;"
                 elif args[i+1] == "west":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=west-apc,login=apc,passwd=apc,pcmk_host_map=west-01:2;west-02:3;west-03:4;west-04:5;west-05:6;west-06:7;west-07:9;west-08:10;west-09:11;west-10:12;west-11:13;west-12:14;west-13:15;west-14:18;west-15:17;west-16:19;"
                 elif args[i+1] == "openstack":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_openstack"
                     
                     print("Obtaining OpenStack credentials from the current environment")
                     self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % (
                         os.environ['OS_REGION_NAME'],
                         os.environ['OS_TENANT_NAME'],
                         os.environ['OS_AUTH_URL'],
                         os.environ['OS_USERNAME'],
                         os.environ['OS_PASSWORD']
                     )
                     
                 elif args[i+1] == "rhevm":
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_rhevm"
                     
                     print("Obtaining RHEV-M credentials from the current environment")
                     self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
                         os.environ['RHEVM_USERNAME'],
                         os.environ['RHEVM_PASSWORD'],
                         os.environ['RHEVM_SERVER'],
                         os.environ['RHEVM_PORT'],
                     )
                     
                 else:
                     self.usage(args[i+1])
 
             elif args[i] == "--stonith-type":
                 self["stonith-type"] = args[i+1]
                 skipthis=1
 
             elif args[i] == "--stonith-args":
                 self["stonith-params"] = args[i+1]
                 skipthis=1
 
             elif args[i] == "--clobber-cib" or args[i] == "-c":
                 self["ClobberCIB"] = 1
                 
             elif args[i] == "--cib-filename":
                 skipthis=1
                 self["CIBfilename"] = args[i+1]
 
             elif args[i] == "--xmit-loss":
                 try:
                     float(args[i+1])
                 except ValueError:
                     print("--xmit-loss parameter should be float")
                     self.usage(args[i+1])
                 skipthis=1
                 self["XmitLoss"] = args[i+1]
 
             elif args[i] == "--recv-loss":
                 try:
                     float(args[i+1])
                 except ValueError:
                     print("--recv-loss parameter should be float")
                     self.usage(args[i+1])
                 skipthis=1
                 self["RecvLoss"] = args[i+1]
 
             elif args[i] == "--choose":
                 skipthis=1
                 self["tests"].append(args[i+1])
                 self["scenario"] = "sequence"
 
             elif args[i] == "--nodes":
                 skipthis=1
                 self["nodes"] = args[i+1].split(' ')
 
             elif args[i] == "-g" or args[i] == "--group" or args[i] == "--dsh-group":
                 skipthis=1
                 self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args[i+1])
                 LogFactory().add_file(self["OutputFile"], "CTS")
 
                 dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args[i+1])
 
                 # Hacks to make my life easier
                 if args[i+1] == "virt1":
                     self["Stack"] = "corosync"
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_xvm"
                     self["stonith-params"] = "delay=0"
                     self["IPBase"] = " fe80::1234:56:7890:1000"
 
                 elif args[i+1] == "east16" or args[i+1] == "nsew":
                     self["Stack"] = "corosync"
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_apc"
                     self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;"
                     self["IPBase"] = " fe80::1234:56:7890:2000"
 
                     if args[i+1] == "east16":
                         # Requires newer python than available via nsew
                         self["IPagent"] = "Dummy"
 
                 elif args[i+1] == "corosync8":
                     self["Stack"] = "corosync"
                     self["DoStonith"]=1
                     self["stonith-type"] = "fence_rhevm"
 
                     print("Obtaining RHEV-M credentials from the current environment")
                     self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
                         os.environ['RHEVM_USERNAME'],
                         os.environ['RHEVM_PASSWORD'],
                         os.environ['RHEVM_SERVER'],
                         os.environ['RHEVM_PORT'],
                    )
                     self["IPBase"] = " fe80::1234:56:7890:3000"
 
                 if os.path.isfile(dsh_file):
                     self["nodes"] = []
                     f = open(dsh_file, 'r')
                     for line in f:
                         l = line.strip().rstrip()
                         if not l.startswith('#'):
                             self["nodes"].append(l)
                     f.close()
 
                 else:
                     print("Unknown DSH group: %s" % args[i+1])
 
             elif args[i] == "--syslog-facility" or args[i] == "--facility":
                 skipthis=1
                 self["SyslogFacility"] = args[i+1]
                 
             elif args[i] == "--seed":
                 skipthis=1
                 self.SeedRandom(args[i+1])
 
             elif args[i] == "--warn-inactive":
                 self["warn-inactive"] = 1
 
             elif args[i] == "--schema":
                 skipthis=1
                 self["Schema"] = args[i+1]
 
             elif args[i] == "--at-boot" or args[i] == "--cluster-starts-at-boot":
                 skipthis=1
                 if args[i+1] == "1" or args[i+1] == "yes":
                     self["at-boot"] = 1
                 elif args[i+1] == "0" or args[i+1] == "no":
                     self["at-boot"] = 0
                 else:
                     self.usage(args[i+1])
 
             elif args[i] == "--stack":
                 if args[i+1] == "fedora" or args[i+1] == "fedora-17" or args[i+1] == "fedora-18":
                     self["Stack"] = "corosync"
                 elif args[i+1] == "rhel-7":
                     self["Stack"] = "corosync"
                 else:
                     self["Stack"] = args[i+1]
                 skipthis=1
 
             elif args[i] == "--once":
                 self["scenario"] = "all-once"
 
             elif args[i] == "--boot":
                 self["scenario"] = "boot"
 
             elif args[i] == "--notification-agent":
                 self["notification-agent"] = args[i+1]
                 skipthis = 1
 
             elif args[i] == "--notification-recipient":
                 self["notification-recipient"] = args[i+1]
                 skipthis = 1
 
             elif args[i] == "--valgrind-tests":
                 self["valgrind-tests"] = 1
 
             elif args[i] == "--valgrind-procs":
                 self["valgrind-procs"] = args[i+1]
                 skipthis = 1
 
             elif args[i] == "--no-loop-tests":
                 self["loop-tests"] = 0
 
             elif args[i] == "--loop-minutes":
                 skipthis=1
                 try:
                     self["loop-minutes"]=int(args[i+1])
                 except ValueError:
                     self.usage(args[i])
 
             elif args[i] == "--no-unsafe-tests":
                 self["unsafe-tests"] = 0
 
             elif args[i] == "--experimental-tests":
                 self["experimental-tests"] = 1
 
             elif args[i] == "--container-tests":
                 self["container-tests"] = 1
 
             elif args[i] == "--set":
                 skipthis=1
                 (name, value) = args[i+1].split('=')
                 self[name] = value
                 print("Setting %s = %s" % (name, value))
                 
             elif args[i] == "--help":
                 self.usage(args[i], 0)
 
             elif args[i] == "--":
                 break
 
             else:
                 try:
                     NumIter=int(args[i])
                     self["iterations"] = NumIter
                 except ValueError:
                     self.usage(args[i])
 
     def usage(self, arg, status=1):
         if status:
             print("Illegal argument %s" % arg)
 
         print("""usage: %s [options] number-of-iterations
 
 Common options:
 \t [--nodes 'node list']        list of cluster nodes separated by whitespace
 \t [--group | -g 'name']        use the nodes listed in the named DSH group (~/.dsh/groups/$name)
 \t [--limit-nodes max]          only use the first 'max' cluster nodes supplied with --nodes
 \t [--stack corosync]           which cluster stack is installed
 \t [--list-tests]               list the valid tests
 \t [--benchmark]                add the timing information
 
 Options that CTS will usually auto-detect correctly:
 \t [--logfile path]             where should the test software look for logs from cluster nodes
 \t [--syslog-facility name]     which syslog facility should the test software log to
 \t [--at-boot (1|0)]            does the cluster software start at boot time
 \t [--test-ip-base ip]          offset for generated IP address resources
 
 Options for release testing:
 \t [--populate-resources | -r]  generate a sample configuration
 \t [--choose name]              run only the named test
 \t [--stonith (1 | 0 | yes | no | rhcs | ssh)]
 \t [--once]                     run all valid tests once
 
 Additional (less common) options:
 \t [--clobber-cib | -c ]        erase any existing configuration
 \t [--outputfile path]          optional location for the test software to write logs to
 \t [--trunc]                    truncate logfile before starting
 \t [--xmit-loss lost-rate(0.0-1.0)]
 \t [--recv-loss lost-rate(0.0-1.0)]
 \t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]
 \t [--stonith-type type]
 \t [--stonith-args name=value]
 \t [--bsc]
 \t [--notification-agent path]  script to configure for Pacemaker alerts
 \t [--notification-recipient r] recipient to pass to alert script
 \t [--no-loop-tests]            don't run looping/time-based tests
 \t [--no-unsafe-tests]          don't run tests that are unsafe for use with ocfs2/drbd
 \t [--valgrind-tests]           include tests using valgrind
 \t [--experimental-tests]       include experimental tests
 \t [--container-tests]          include pacemaker_remote tests that run in lxc container resources
 \t [--oprofile 'node list']     list of cluster nodes to run oprofile on]
 \t [--qarsh]                    use the QARSH backdoor to access nodes instead of SSH
 \t [--seed random_seed]
 \t [--set option=value]
 \t [--yes | -y]                 continue to run cts when there is an interaction whether to continue running pacemaker-cts
 
 Example:
 \t python %s -g virt1 -r --stonith ssh --schema pacemaker-2.0 500""" % (sys.argv[0], sys.argv[0]))
 
         sys.exit(status)
 
 class EnvFactory(object):
     instance = None
     def __init__(self):
         pass
 
     def getInstance(self, args=None):
         if not EnvFactory.instance:
             EnvFactory.instance = Environment(args)
         return EnvFactory.instance