diff --git a/cts/CTSlab.py b/cts/CTSlab.py
index a13f66c1be..278f19114e 100755
--- a/cts/CTSlab.py
+++ b/cts/CTSlab.py
@@ -1,611 +1,616 @@
 #!/usr/bin/python
 
 '''CTS: Cluster Testing System: Lab environment module
  '''
 
 __copyright__='''
 Copyright (C) 2001,2005 Alan Robertson <alanr@unix.sh>
 Licensed under the GNU GPL.
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
 from UserDict import UserDict
 import sys, types, string, string, signal, os, socket
 
 pdir=os.path.dirname(sys.path[0])
 sys.path.insert(0, pdir) # So that things work from the source directory
 
 try:
     from cts.CTSvars    import *
     from cts.CM_ais     import *
     from cts.CM_lha     import crm_lha
     from cts.CTSaudits  import AuditList
     from cts.CTStests   import TestList
     from cts.CTSscenarios import *
 
 except ImportError:
     sys.stderr.write("abort: couldn't find cts libraries in [%s]\n" %
                      ' '.join(sys.path))
     sys.stderr.write("(check your install and PYTHONPATH)\n")
 
     # Now do it again to get more details
     from cts.CTSvars    import *
     from cts.CM_ais     import *
     from cts.CM_lha     import crm_lha
     from cts.CTSaudits  import AuditList
     from cts.CTStests   import TestList
     from cts.CTSscenarios import *
     sys.exit(-1)
 
 cm = None
 Tests = []
 Chosen = []
 scenario = None
 
 # Not really used, the handler in
 def sig_handler(signum, frame) :
     if cm: cm.log("Interrupted by signal %d"%signum)
     if scenario: scenario.summarize()
     if signum == 15 :
         if scenario: scenario.TearDown()
         sys.exit(1)
 
 class LabEnvironment(CtsLab):
 
     def __init__(self):
         CtsLab.__init__(self)
 
         #  Get a random seed for the random number generator.
         self["DoStandby"] = 1
         self["DoFencing"] = 1
         self["XmitLoss"] = "0.0"
         self["RecvLoss"] = "0.0"
         self["ClobberCIB"] = 0
         self["CIBfilename"] = None
         self["CIBResource"] = 0
         self["DoBSC"]    = 0
         self["use_logd"] = 0
         self["oprofile"] = []
         self["warn-inactive"] = 0
         self["ListTests"] = 0
         self["benchmark"] = 0
         self["Schema"] = "pacemaker-1.0"
         self["Stack"] = "openais"
         self["stonith-type"] = "external/ssh"
         self["stonith-params"] = "hostlist=all,livedangerously=yes"
         self["logger"] = ([StdErrLog(self)])
         self["loop-minutes"] = 60
         self["valgrind-prefix"] = None
         self["valgrind-procs"] = "cib crmd attrd pengine stonith-ng"
         self["valgrind-opts"] = """--leak-check=full --show-reachable=yes --trace-children=no --num-callers=25 --gen-suppressions=all --suppressions="""+CTSvars.CTS_home+"""/cts.supp"""
         #self["valgrind-opts"] = """--trace-children=no --num-callers=25 --gen-suppressions=all --suppressions="""+CTSvars.CTS_home+"""/cts.supp"""
 
         self["experimental-tests"] = 0
         self["valgrind-tests"] = 0
         self["unsafe-tests"] = 1
         self["loop-tests"] = 1
         self["scenario"] = "random"
         self["stats"] = 0
 
         master = socket.gethostname()
 
         # Use the IP where possible to avoid name lookup failures
         for ip in socket.gethostbyname_ex(master)[2]:
             if ip != "127.0.0.1":
                 master = ip
                 break;
         self["cts-master"] = master
 
 def usage(arg, status=1):
     print "Illegal argument " + arg
     print "usage: " + sys.argv[0] +" [options] number-of-iterations"
     print "\nCommon options: "
     print "\t [--nodes 'node list']        list of cluster nodes separated by whitespace"
     print "\t [--group | -g 'name']        use the nodes listed in the named DSH group (~/.dsh/groups/$name)"
     print "\t [--limit-nodes max]          only use the first 'max' cluster nodes supplied with --nodes"
     print "\t [--stack (v0|v1|cman|corosync|heartbeat|openais)]    which cluster stack is installed"
     print "\t [--list-tests]               list the valid tests"
     print "\t [--benchmark]                add the timing information"
     print "\t "
     print "Options that CTS will usually auto-detect correctly: "
     print "\t [--logfile path]             where should the test software look for logs from cluster nodes"
     print "\t [--syslog-facility name]     which syslog facility should the test software log to"
     print "\t [--at-boot (1|0)]            does the cluster software start at boot time"
     print "\t [--test-ip-base ip]          offset for generated IP address resources"
     print "\t "
     print "Options for release testing: "
     print "\t [--populate-resources | -r]  generate a sample configuration"
     print "\t [--choose name]              run only the named test"
     print "\t [--stonith (1 | 0 | yes | no | rhcs | ssh)]"
     print "\t [--once]                     run all valid tests once"
     print "\t "
     print "Additional (less common) options: "
     print "\t [--clobber-cib | -c ]        erase any existing configuration"
     print "\t [--outputfile path]          optional location for the test software to write logs to"
     print "\t [--trunc]                    truncate logfile before starting"
     print "\t [--xmit-loss lost-rate(0.0-1.0)]"
     print "\t [--recv-loss lost-rate(0.0-1.0)]"
     print "\t [--standby (1 | 0 | yes | no)]"
     print "\t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]"
     print "\t [--stonith-type type]"
     print "\t [--stonith-args name=value]"
     print "\t [--bsc]"
     print "\t [--no-loop-tests]            dont run looping/time-based tests"
     print "\t [--no-unsafe-tests]          dont run tests that are unsafe for use with ocfs2/drbd"
     print "\t [--valgrind-tests]           include tests using valgrind"
     print "\t [--experimental-tests]       include experimental tests"
     print "\t [--oprofile 'node list']     list of cluster nodes to run oprofile on]"
     print "\t [--qarsh]                    use the QARSH backdoor to access nodes instead of SSH"
     print "\t [--seed random_seed]"
     print "\t [--set option=value]"
     print "\t "
     print "\t Example: "
     print "\t    python ./CTSlab.py -g virt1 --stack cs -r --stonith ssh --schema pacemaker-1.0 500"
 
     sys.exit(status)
 
 
 #
 #   A little test code...
 #
 if __name__ == '__main__':
 
     Environment = LabEnvironment()
     rsh = RemoteExec(None, silent=True)
 
     NumIter = 0
     Version = 1
     LimitNodes = 0
     TruncateLog = 0
     ListTests = 0
     HaveSeed = 0
     node_list = ''
 
     # Set the signal handler
     signal.signal(15, sig_handler)
     signal.signal(10, sig_handler)
 
     # Process arguments...
 
     skipthis=None
     args=sys.argv[1:]
     for i in range(0, len(args)):
        if skipthis:
            skipthis=None
            continue
 
        elif args[i] == "-l" or args[i] == "--limit-nodes":
            skipthis=1
            LimitNodes = int(args[i+1])
 
        elif args[i] == "-r" or args[i] == "--populate-resources":
            Environment["CIBResource"] = 1
            Environment["ClobberCIB"] = 1
 
        elif args[i] == "-L" or args[i] == "--logfile":
            skipthis=1
            Environment["LogFileName"] = args[i+1]
 
        elif args[i] == "--outputfile":
            skipthis=1
            Environment["OutputFile"] = args[i+1]
 
        elif args[i] == "--ip" or args[i] == "--test-ip-base":
            skipthis=1
            Environment["IPBase"] = args[i+1]
            Environment["CIBResource"] = 1
            Environment["ClobberCIB"] = 1
 
        elif args[i] == "--oprofile":
            skipthis=1
            Environment["oprofile"] = args[i+1].split(' ')
 
        elif args[i] == "--trunc":
            Environment["TruncateLog"]=1
 
        elif args[i] == "--list-tests" or args[i] == "--list" :
            Environment["ListTests"]=1
 
        elif args[i] == "--benchmark":
            Environment["benchmark"]=1
 
        elif args[i] == "--bsc":
            Environment["DoBSC"] = 1
            Environment["scenario"] = "basic-sanity"
 
        elif args[i] == "--qarsh":
            Environment.rsh.enable_qarsh()
            rsh.enable_qarsh()
 
        elif args[i] == "--stonith" or args[i] == "--fencing":
            skipthis=1
            if args[i+1] == "1" or args[i+1] == "yes":
                Environment["DoFencing"]=1
            elif args[i+1] == "0" or args[i+1] == "no":
                Environment["DoFencing"]=0
            elif args[i+1] == "rhcs":
                Environment["DoStonith"]=1
                Environment["stonith-type"] = "fence_xvm"
                Environment["stonith-params"] = "pcmk_arg_map=domain:uname,delay=0"
            elif args[i+1] == "ssh" or args[i+1] == "lha":
                Environment["DoStonith"]=1
                Environment["stonith-type"] = "external/ssh"
                Environment["stonith-params"] = "hostlist=all,livedangerously=yes"
            elif args[i+1] == "north":
                Environment["DoStonith"]=1
                Environment["stonith-type"] = "fence_apc"
                Environment["stonith-params"] = "ipaddr=north-apc,login=apc,passwd=apc,pcmk_host_map=north-01:2;north-02:3;north-03:4;north-04:5;north-05:6;north-06:7;north-07:9;north-08:10;north-09:11;north-10:12;north-11:13;north-12:14;north-13:15;north-14:18;north-15:17;north-16:19;"
            elif args[i+1] == "south":
                Environment["DoStonith"]=1
                Environment["stonith-type"] = "fence_apc"
                Environment["stonith-params"] = "ipaddr=south-apc,login=apc,passwd=apc,pcmk_host_map=south-01:2;south-02:3;south-03:4;south-04:5;south-05:6;south-06:7;south-07:9;south-08:10;south-09:11;south-10:12;south-11:13;south-12:14;south-13:15;south-14:18;south-15:17;south-16:19;"
            elif args[i+1] == "east":
                Environment["DoStonith"]=1
                Environment["stonith-type"] = "fence_apc"
                Environment["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;"
            elif args[i+1] == "west":
                Environment["DoStonith"]=1
                Environment["stonith-type"] = "fence_apc"
                Environment["stonith-params"] = "ipaddr=west-apc,login=apc,passwd=apc,pcmk_host_map=west-01:2;west-02:3;west-03:4;west-04:5;west-05:6;west-06:7;west-07:9;west-08:10;west-09:11;west-10:12;west-11:13;west-12:14;west-13:15;west-14:18;west-15:17;west-16:19;"
            elif args[i+1] == "openstack":
                Environment["DoStonith"]=1
                Environment["stonith-type"] = "fence_openstack"
 
                print "Obtaining OpenStack credentials from the current environment"
                Environment["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % (
                    os.environ['OS_REGION_NAME'],
                    os.environ['OS_TENANT_NAME'],
                    os.environ['OS_AUTH_URL'],
                    os.environ['OS_USERNAME'],
                    os.environ['OS_PASSWORD']
                    )
 
            elif args[i+1] == "rhevm":
                Environment["DoStonith"]=1
                Environment["stonith-type"] = "fence_rhevm"
 
                print "Obtaining RHEV-M credentials from the current environment"
                Environment["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
                    os.environ['RHEVM_USERNAME'],
                    os.environ['RHEVM_PASSWORD'],
                    os.environ['RHEVM_SERVER'],
                    os.environ['RHEVM_PORT'],
                    )
 
            else:
                usage(args[i+1])
 
        elif args[i] == "--stonith-type":
            Environment["stonith-type"] = args[i+1]
            skipthis=1
 
        elif args[i] == "--stonith-args":
            Environment["stonith-params"] = args[i+1]
            skipthis=1
 
        elif args[i] == "--standby":
            skipthis=1
            if args[i+1] == "1" or args[i+1] == "yes":
                Environment["DoStandby"] = 1
            elif args[i+1] == "0" or args[i+1] == "no":
                Environment["DoStandby"] = 0
            else:
                usage(args[i+1])
 
        elif args[i] == "--clobber-cib" or args[i] == "-c":
            Environment["ClobberCIB"] = 1
 
        elif args[i] == "--cib-filename":
            skipthis=1
            Environment["CIBfilename"] = args[i+1]
 
        elif args[i] == "--xmit-loss":
            try:
                float(args[i+1])
            except ValueError:
                print ("--xmit-loss parameter should be float")
                usage(args[i+1])
            skipthis=1
            Environment["XmitLoss"] = args[i+1]
 
        elif args[i] == "--recv-loss":
            try:
                float(args[i+1])
            except ValueError:
                print ("--recv-loss parameter should be float")
                usage(args[i+1])
            skipthis=1
            Environment["RecvLoss"] = args[i+1]
 
        elif args[i] == "--choose":
            skipthis=1
            Chosen.append(args[i+1])
            Environment["scenario"] = "sequence"
 
        elif args[i] == "--nodes":
            skipthis=1
            node_list = args[i+1].split(' ')
 
        elif args[i] == "-g" or args[i] == "--group" or args[i] == "--dsh-group":
            skipthis=1
            Environment["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args[i+1])
 
            dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args[i+1])
            if os.path.isfile(dsh_file):
                node_list = []
                f = open(dsh_file, 'r')
                for line in f:
                    l = line.strip().rstrip()
                    if not l.startswith('#'):
                        node_list.append(l)
                f.close()
 
            else:
                print("Unknown DSH group: %s" % args[i+1])
 
        elif args[i] == "--syslog-facility" or args[i] == "--facility":
            skipthis=1
            Environment["SyslogFacility"] = args[i+1]
 
        elif args[i] == "--seed":
            skipthis=1
            Environment.SeedRandom(args[i+1])
 
        elif args[i] == "--warn-inactive":
            Environment["warn-inactive"] = 1
 
        elif args[i] == "--schema":
            skipthis=1
            Environment["Schema"] = args[i+1]
 
        elif args[i] == "--ais":
            Environment["Stack"] = "openais"
 
        elif args[i] == "--at-boot" or args[i] == "--cluster-starts-at-boot":
            skipthis=1
            if args[i+1] == "1" or args[i+1] == "yes":
                Environment["at-boot"] = 1
            elif args[i+1] == "0" or args[i+1] == "no":
                Environment["at-boot"] = 0
            else:
                usage(args[i+1])
 
        elif args[i] == "--heartbeat" or args[i] == "--lha":
            Environment["Stack"] = "heartbeat"
 
        elif args[i] == "--hae":
            Environment["Stack"] = "openais"
            Environment["Schema"] = "hae"
 
        elif args[i] == "--stack":
            if args[i+1] == "fedora" or args[i+1] == "fedora-17" or args[i+1] == "fedora-18":
                Environment["Stack"] = "corosync"
            elif args[i+1] == "rhel-6":
                Environment["Stack"] = "cman"
            elif args[i+1] == "rhel-7":
                Environment["Stack"] = "corosync"
            else:
                Environment["Stack"] = args[i+1]
            skipthis=1
 
        elif args[i] == "--once":
            Environment["scenario"] = "all-once"
 
+       elif args[i] == "--boot":
+           Environment["scenario"] = "boot"
+
        elif args[i] == "--valgrind-tests":
            Environment["valgrind-tests"] = 1
 
        elif args[i] == "--no-loop-tests":
            Environment["loop-tests"] = 0
 
        elif args[i] == "--loop-minutes":
            skipthis=1
            try:
                Environment["loop-minutes"]=int(args[i+1])
            except ValueError:
                usage(args[i])
 
        elif args[i] == "--no-unsafe-tests":
            Environment["unsafe-tests"] = 0
 
        elif args[i] == "--experimental-tests":
            Environment["experimental-tests"] = 1
 
        elif args[i] == "--set":
            skipthis=1
            (name, value) = args[i+1].split('=')
            Environment[name] = value
            print "Setting %s = %s" % (name, value)
 
        elif args[i] == "--":
            break
 
        else:
            try:
                NumIter=int(args[i])
            except ValueError:
                usage(args[i])
 
     if Environment["DoBSC"]:
         NumIter = 2
         LimitNodes = 1
         Chosen.append("AddResource")
         Environment["ClobberCIB"]  = 1
         Environment["CIBResource"] = 0
         Environment["logger"].append(FileLog(Environment, Environment["LogFileName"]))
 
     elif Environment["OutputFile"]:
         Environment["logger"].append(FileLog(Environment, Environment["OutputFile"]))
 
     elif Environment["SyslogFacility"]:
         Environment["logger"].append(SysLog(Environment))
 
     if Environment["Stack"] == "heartbeat" or Environment["Stack"] == "lha":
         Environment["Stack"]    = "heartbeat"
         Environment['CMclass']  = crm_lha
 
     elif Environment["Stack"] == "openais" or Environment["Stack"] == "ais"  or Environment["Stack"] == "whitetank":
         Environment["Stack"]    = "openais (whitetank)"
         Environment['CMclass']  = crm_whitetank
         Environment["use_logd"] = 0
 
     elif Environment["Stack"] == "corosync" or Environment["Stack"] == "cs" or Environment["Stack"] == "mcp":
         Environment["Stack"]    = "corosync"
         Environment['CMclass']  = crm_mcp
         Environment["use_logd"] = 0
 
     elif Environment["Stack"] == "cman":
         Environment["Stack"]    = "corosync (cman)"
         Environment['CMclass']  = crm_cman
         Environment["use_logd"] = 0
 
     elif Environment["Stack"] == "v1":
         Environment["Stack"]    = "corosync (plugin v1)"
         Environment['CMclass']  = crm_cs_v1
         Environment["use_logd"] = 0
 
     elif Environment["Stack"] == "v0":
         Environment["Stack"]    = "corosync (plugin v0)"
         Environment['CMclass']  = crm_cs_v0
         Environment["use_logd"] = 0
 
     else:
         print "Unknown stack: "+Environment["Stack"]
         sys.exit(1)
 
     if len(node_list) < 1:
         print "No nodes specified!"
         sys.exit(1)
 
     if LimitNodes > 0:
         if len(node_list) > LimitNodes:
             print("Limiting the number of nodes configured=%d (max=%d)"
                   %(len(node_list), LimitNodes))
             while len(node_list) > LimitNodes:
                 node_list.pop(len(node_list)-1)
 
     Environment["nodes"] = []
     for n in node_list:
        if len(n.strip()):
            Environment["nodes"].append(n.strip())
 
     discover = random.Random().choice(Environment["nodes"])
     Environment["have_systemd"] = not rsh(discover, "systemctl list-units")
 
     # Detect syslog variant
     if not Environment.has_key("syslogd") or not Environment["syslogd"]:
         if Environment["have_systemd"]:
             # Systemd
             Environment["syslogd"] = rsh(discover, "systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", stdout=1)
         else:
             # SYS-V
             Environment["syslogd"] = rsh(discover, "chkconfig | grep syslog.*on | awk '{print $1}' | head -n 1", stdout=1)
 
         if not Environment.has_key("syslogd") or not Environment["syslogd"]:
             # default
             Environment["syslogd"] = "rsyslog"
 
     # Detect if the cluster starts at boot
     if not Environment.has_key("at-boot"):
         atboot = 0
 
         if Environment["have_systemd"]:
             # Systemd
             atboot = atboot or not rsh(discover, "systemctl is-enabled heartbeat.service")
             atboot = atboot or not rsh(discover, "systemctl is-enabled corosync.service")
             atboot = atboot or not rsh(discover, "systemctl is-enabled pacemaker.service")
         else:
             # SYS-V
             atboot = atboot or not rsh(discover, "chkconfig | grep -e corosync.*on -e heartbeat.*on -e pacemaker.*on")
 
         Environment["at-boot"] = atboot
 
     # Try to determinw an offset for IPaddr resources
     if Environment["CIBResource"] and not Environment.has_key("IPBase"):
         network=rsh(discover, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", stdout=1).strip()
         Environment["IPBase"] = rsh(discover, "nmap -sn -n %s | grep 'scan report' | tail -n 1 | awk '{print $NF}' | sed 's:(::' | sed 's:)::'" % network, stdout=1).strip()
         if not Environment["IPBase"]:
             Environment["IPBase"] = "127.0.0.10"
             Environment.log("Could not determine an offset for IPaddr resources.  Perhaps nmap is not installed on the nodes.")
             Environment.log("Defaulting to '%s', use --test-ip-base to override" % Environment["IPBase"])
 
     # Create the Cluster Manager object
     cm = Environment['CMclass'](Environment)
     if TruncateLog:
         Environment.log("Truncating %s" % LogFile)
         lf = open(LogFile, "w");
         if lf != None:
             lf.truncate(0)
             lf.close()
 
     Audits = AuditList(cm)
 
     if Environment["ListTests"] == 1 :
         Tests = TestList(cm, Audits)
         Environment.log("Total %d tests"%len(Tests))
         for test in Tests :
             Environment.log(str(test.name));
         sys.exit(0)
 
     if len(Chosen) == 0:
         Tests = TestList(cm, Audits)
 
     else:
         for TestCase in Chosen:
            match = None
 
            for test in TestList(cm, Audits):
                if test.name == TestCase:
                    match = test
 
            if not match:
                usage("--choose: No applicable/valid tests chosen")
            else:
                Tests.append(match)
 
     # Scenario selection
     if Environment["scenario"] == "basic-sanity":
         scenario = RandomTests(cm, [ BasicSanityCheck(Environment) ], Audits, Tests)
 
     elif Environment["scenario"] == "all-once":
         NumIter = len(Tests)
         scenario = AllOnce(
-            cm, [ InitClusterManager(Environment), PacketLoss(Environment) ], Audits, Tests)
+            cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests)
     elif Environment["scenario"] == "sequence":
         scenario = Sequence(
-            cm, [ InitClusterManager(Environment), PacketLoss(Environment) ], Audits, Tests)
+            cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests)
+    elif Environment["scenario"] == "boot":
+        scenario = Sequence(cm, [ BootCluster(Environment)], Audits)
     else:
         scenario = RandomTests(
-            cm, [ InitClusterManager(Environment), PacketLoss(Environment) ], Audits, Tests)
+            cm, [ BootCluster(Environment), PacketLoss(Environment) ], Audits, Tests)
 
     Environment.log(">>>>>>>>>>>>>>>> BEGINNING " + repr(NumIter) + " TESTS ")
     Environment.log("Stack:                  %s" % Environment["Stack"])
     Environment.log("Schema:                 %s" % Environment["Schema"])
     Environment.log("Scenario:               %s" % scenario.__doc__)
     Environment.log("CTS Master:             %s" % Environment["cts-master"])
     Environment.log("CTS Logfile:            %s" % Environment["OutputFile"])
     Environment.log("Random Seed:            %s" % Environment["RandSeed"])
     Environment.log("Syslog variant:         %s" % Environment["syslogd"].strip())
     Environment.log("System log files:       %s" % Environment["LogFileName"])
 #    Environment.log(" ")
     if Environment.has_key("IPBase"):
         Environment.log("Base IP for resources:  %s" % Environment["IPBase"])
     Environment.log("Cluster starts at boot: %d" % Environment["at-boot"])
 
 
     Environment.dump()
     rc = Environment.run(scenario, NumIter)
     sys.exit(rc)
diff --git a/cts/CTSscenarios.py b/cts/CTSscenarios.py
index 2fd8c4c9e0..b121572e3d 100644
--- a/cts/CTSscenarios.py
+++ b/cts/CTSscenarios.py
@@ -1,547 +1,547 @@
 from CTS import *
 from CTStests import CTSTest
 from CTSaudits import ClusterAudit
 class ScenarioComponent:
 
     def __init__(self, Env):
         self.Env = Env
 
     def IsApplicable(self):
         '''Return TRUE if the current ScenarioComponent is applicable
         in the given LabEnvironment given to the constructor.
         '''
 
         raise ValueError("Abstract Class member (IsApplicable)")
 
     def SetUp(self, CM):
         '''Set up the given ScenarioComponent'''
         raise ValueError("Abstract Class member (Setup)")
 
     def TearDown(self, CM):
         '''Tear down (undo) the given ScenarioComponent'''
         raise ValueError("Abstract Class member (Setup)")
 
 class Scenario:
     (
 '''The basic idea of a scenario is that of an ordered list of
 ScenarioComponent objects.  Each ScenarioComponent is SetUp() in turn,
 and then after the tests have been run, they are torn down using TearDown()
 (in reverse order).
 
 A Scenario is applicable to a particular cluster manager iff each
 ScenarioComponent is applicable.
 
 A partially set up scenario is torn down if it fails during setup.
 ''')
 
     def __init__(self, ClusterManager, Components, Audits, Tests):
 
         "Initialize the Scenario from the list of ScenarioComponents"
         self.ClusterManager = ClusterManager
         self.Components = Components
         self.Audits  = Audits
         self.Tests = Tests
 
         self.BadNews = None
         self.TestSets = []
         self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0}
         self.Sets = []
 
         #self.ns=CTS.NodeStatus(self.Env)
 
         for comp in Components:
             if not issubclass(comp.__class__, ScenarioComponent):
                 raise ValueError("Init value must be subclass of ScenarioComponent")
 
         for audit in Audits:
             if not issubclass(audit.__class__, ClusterAudit):
                 raise ValueError("Init value must be subclass of ClusterAudit")
 
         for test in Tests:
             if not issubclass(test.__class__, CTSTest):
                 raise ValueError("Init value must be a subclass of CTSTest")
 
     def IsApplicable(self):
         (
 '''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable()
 '''
         )
 
         for comp in self.Components:
             if not comp.IsApplicable():
                 return None
         return 1
 
     def SetUp(self):
         '''Set up the Scenario. Return TRUE on success.'''
 
         self.audit() # Also detects remote/local log config
         self.ClusterManager.prepare()
         self.ClusterManager.ns.WaitForAllNodesToComeUp(self.ClusterManager.Env["nodes"])
 
         self.audit()
         if self.ClusterManager.Env["valgrind-tests"]:
             self.ClusterManager.install_helper("cts.supp")
 
         self.BadNews = LogWatcher(self.ClusterManager.Env,
                                   self.ClusterManager["LogFileName"],
                                   self.ClusterManager["BadRegexes"], "BadNews", 0)
         self.BadNews.setwatch() # Call after we've figured out what type of log watching to do in LogAudit
 
         j=0
         while j < len(self.Components):
             if not self.Components[j].SetUp(self.ClusterManager):
                 # OOPS!  We failed.  Tear partial setups down.
                 self.audit()
                 self.ClusterManager.log("Tearing down partial setup")
                 self.TearDown(j)
                 return None
             j=j+1
 
         self.audit()
         return 1
 
     def TearDown(self, max=None):
 
         '''Tear Down the Scenario - in reverse order.'''
 
         if max == None:
             max = len(self.Components)-1
         j=max
         while j >= 0:
             self.Components[j].TearDown(self.ClusterManager)
             j=j-1
 
         self.audit()
 
     def incr(self, name):
         '''Increment (or initialize) the value associated with the given name'''
         if not self.Stats.has_key(name):
             self.Stats[name]=0
         self.Stats[name] = self.Stats[name]+1
 
     def run(self, Iterations):
         self.ClusterManager.oprofileStart()
         try:
             self.run_loop(Iterations)
             self.ClusterManager.oprofileStop()
         except:
             self.ClusterManager.oprofileStop()
             raise
 
     def run_loop(self, Iterations):
         raise ValueError("Abstract Class member (run_loop)")
 
     def run_test(self, test, testcount):
         nodechoice = self.ClusterManager.Env.RandomNode()
 
         ret = 1
         where = ""
         did_run = 0
 
         self.ClusterManager.Env.StatsMark(testcount)
         self.ClusterManager.instance_errorstoignore_clear()
         self.ClusterManager.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) +"["+ ("%d" % testcount).rjust(3) +"]")
 
         starttime = test.set_timer()
         if not test.setup(nodechoice):
             self.ClusterManager.log("Setup failed")
             ret = 0
 
         elif not test.canrunnow(nodechoice):
             self.ClusterManager.log("Skipped")
             test.skipped()
 
         else:
             did_run = 1
             ret = test(nodechoice)
 
         if not test.teardown(nodechoice):
             self.ClusterManager.log("Teardown failed")
             answer = raw_input('Continue? [nY] ')
             if answer and answer == "n":
                 raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice))
             ret = 0
 
         stoptime=time.time()
         self.ClusterManager.oprofileSave(testcount)
 
         elapsed_time = stoptime - starttime
         test_time = stoptime - test.get_timer()
         if not test.has_key("min_time"):
             test["elapsed_time"] = elapsed_time
             test["min_time"] = test_time
             test["max_time"] = test_time
         else:
             test["elapsed_time"] = test["elapsed_time"] + elapsed_time
             if test_time < test["min_time"]:
                 test["min_time"] = test_time
             if test_time > test["max_time"]:
                 test["max_time"] = test_time
 
         if ret:
             self.incr("success")
             test.log_timer()
         else:
             self.incr("failure")
             self.ClusterManager.statall()
             did_run = 1  # Force the test count to be incrimented anyway so test extraction works
 
         self.audit(test.errorstoignore())
         return did_run
 
     def summarize(self):
         self.ClusterManager.log("****************")
         self.ClusterManager.log("Overall Results:" + repr(self.Stats))
         self.ClusterManager.log("****************")
 
         stat_filter = {
             "calls":0,
             "failure":0,
             "skipped":0,
             "auditfail":0,
             }
         self.ClusterManager.log("Test Summary")
         for test in self.Tests:
             for key in stat_filter.keys():
                 stat_filter[key] = test.Stats[key]
             self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter))
 
         self.ClusterManager.debug("Detailed Results")
         for test in self.Tests:
             self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats))
 
         self.ClusterManager.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
 
     def audit(self, LocalIgnore=[]):
         errcount=0
         ignorelist = []
         ignorelist.append("CTS:")
         ignorelist.extend(LocalIgnore)
         ignorelist.extend(self.ClusterManager.errorstoignore())
         ignorelist.extend(self.ClusterManager.instance_errorstoignore())
 
         # This makes sure everything is stabilized before starting...
         failed = 0
         for audit in self.Audits:
             if not audit():
                 self.ClusterManager.log("Audit " + audit.name() + " FAILED.")
                 failed += 1
             else:
                 self.ClusterManager.debug("Audit " + audit.name() + " passed.")
 
         while errcount < 1000:
             match = None
             if self.BadNews:
                 match=self.BadNews.look(0)
 
             if match:
                 add_err = 1
                 for ignore in ignorelist:
                     if add_err == 1 and re.search(ignore, match):
                         add_err = 0
                 if add_err == 1:
                     self.ClusterManager.log("BadNews: " + match)
                     self.incr("BadNews")
                     errcount=errcount+1
             else:
                 break
         else:
             answer = raw_input('Big problems.  Continue? [nY]')
             if answer and answer == "n":
                 self.ClusterManager.log("Shutting down.")
                 self.summarize()
                 self.TearDown()
                 raise ValueError("Looks like we hit a BadNews jackpot!")
 
         return failed
 
 class AllOnce(Scenario):
     '''Every Test Once''' # Accessable as __doc__
     def run_loop(self, Iterations):
         testcount=1
         for test in self.Tests:
             self.run_test(test, testcount)
             testcount += 1
 
 class RandomTests(Scenario):
     '''Random Test Execution'''
     def run_loop(self, Iterations):
         testcount=1
         while testcount <= Iterations:
             test = self.ClusterManager.Env.RandomGen.choice(self.Tests)
             self.run_test(test, testcount)
             testcount += 1
 
 class BasicSanity(Scenario):
     '''Basic Cluster Sanity'''
     def run_loop(self, Iterations):
         testcount=1
         while testcount <= Iterations:
             test = self.Environment.RandomGen.choice(self.Tests)
             self.run_test(test, testcount)
             testcount += 1
 
 class Sequence(Scenario):
     '''Named Tests in Sequence'''
     def run_loop(self, Iterations):
         testcount=1
         while testcount <= Iterations:
             for test in self.Tests:
                 self.run_test(test, testcount)
                 testcount += 1
 
-class InitClusterManager(ScenarioComponent):
+class BootCluster(ScenarioComponent):
     (
-'''InitClusterManager is the most basic of ScenarioComponents.
+'''BootCluster is the most basic of ScenarioComponents.
 This ScenarioComponent simply starts the cluster manager on all the nodes.
 It is fairly robust as it waits for all nodes to come up before starting
 as they might have been rebooted or crashed for some reason beforehand.
 ''')
     def __init__(self, Env):
         pass
 
     def IsApplicable(self):
-        '''InitClusterManager is so generic it is always Applicable'''
+        '''BootCluster is so generic it is always Applicable'''
         return 1
 
     def SetUp(self, CM):
         '''Basic Cluster Manager startup.  Start everything'''
 
         CM.prepare()
 
         #        Clear out the cobwebs ;-)
         self.TearDown(CM)
 
         # Now start the Cluster Manager on all the nodes.
         CM.log("Starting Cluster Manager on all nodes.")
         return CM.startall(verbose=True)
 
     def TearDown(self, CM):
         '''Set up the given ScenarioComponent'''
 
         # Stop the cluster manager everywhere
 
         CM.log("Stopping Cluster Manager on all nodes")
         return CM.stopall(verbose=True)
 
 class PingFest(ScenarioComponent):
     (
 '''PingFest does a flood ping to each node in the cluster from the test machine.
 
 If the LabEnvironment Parameter PingSize is set, it will be used as the size
 of ping packet requested (via the -s option).  If it is not set, it defaults
 to 1024 bytes.
 
 According to the manual page for ping:
     Outputs packets as fast as they come back or one hundred times per
     second, whichever is more.  For every ECHO_REQUEST sent a period ``.''
     is printed, while for every ECHO_REPLY received a backspace is printed.
     This provides a rapid display of how many packets are being dropped.
     Only the super-user may use this option.  This can be very hard on a net-
     work and should be used with caution.
 ''' )
 
     def __init__(self, Env):
         self.Env = Env
 
     def IsApplicable(self):
         '''PingFests are always applicable ;-)
         '''
 
         return 1
 
     def SetUp(self, CM):
         '''Start the PingFest!'''
 
         self.PingSize=1024
         if CM.Env.has_key("PingSize"):
                 self.PingSize=CM.Env["PingSize"]
 
         CM.log("Starting %d byte flood pings" % self.PingSize)
 
         self.PingPids=[]
         for node in CM.Env["nodes"]:
             self.PingPids.append(self._pingchild(node))
 
         CM.log("Ping PIDs: " + repr(self.PingPids))
         return 1
 
     def TearDown(self, CM):
         '''Stop it right now!  My ears are pinging!!'''
 
         for pid in self.PingPids:
             if pid != None:
                 CM.log("Stopping ping process %d" % pid)
                 os.kill(pid, signal.SIGKILL)
 
     def _pingchild(self, node):
 
         Args = ["ping", "-qfn", "-s", str(self.PingSize), node]
 
 
         sys.stdin.flush()
         sys.stdout.flush()
         sys.stderr.flush()
         pid = os.fork()
 
         if pid < 0:
             self.Env.log("Cannot fork ping child")
             return None
         if pid > 0:
             return pid
 
 
         # Otherwise, we're the child process.
 
 
         os.execvp("ping", Args)
         self.Env.log("Cannot execvp ping: " + repr(Args))
         sys.exit(1)
 
 class PacketLoss(ScenarioComponent):
     (
 '''
 It would be useful to do some testing of CTS with a modest amount of packet loss
 enabled - so we could see that everything runs like it should with a certain
 amount of packet loss present.
 ''')
 
     def IsApplicable(self):
         '''always Applicable'''
         return 1
 
     def SetUp(self, CM):
         '''Reduce the reliability of communications'''
         if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 :
             return 1
 
         for node in CM.Env["nodes"]:
             CM.reducecomm_node(node)
 
         CM.log("Reduce the reliability of communications")
 
         return 1
 
 
     def TearDown(self, CM):
         '''Fix the reliability of communications'''
 
         if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 :
             return 1
 
         for node in CM.Env["nodes"]:
             CM.unisolate_node(node)
 
         CM.log("Fix the reliability of communications")
 
 
 class BasicSanityCheck(ScenarioComponent):
     (
 '''
 ''')
 
     def IsApplicable(self):
         return self.Env["DoBSC"]
 
     def SetUp(self, CM):
 
         CM.prepare()
 
         # Clear out the cobwebs
         self.TearDown(CM)
 
         # Now start the Cluster Manager on all the nodes.
         CM.log("Starting Cluster Manager on BSC node(s).")
         return CM.startall()
 
     def TearDown(self, CM):
         CM.log("Stopping Cluster Manager on BSC node(s).")
         return CM.stopall()
 
 class Benchmark(ScenarioComponent):
     (
 '''
 ''')
 
     def IsApplicable(self):
         return self.Env["benchmark"]
 
     def SetUp(self, CM):
 
         CM.prepare()
 
         # Clear out the cobwebs
         self.TearDown(CM)
 
         # Now start the Cluster Manager on all the nodes.
         CM.log("Starting Cluster Manager on all node(s).")
         return CM.startall()
 
     def TearDown(self, CM):
         CM.log("Stopping Cluster Manager on all node(s).")
         return CM.stopall()
 
 class RollingUpgrade(ScenarioComponent):
     (
 '''
 Test a rolling upgrade between two versions of the stack
 ''')
 
     def __init__(self, Env):
         self.Env = Env
 
     def IsApplicable(self):
         if not self.Env["rpm-dir"]:
             return None
         if not self.Env["current-version"]:
             return None
         if not self.Env["previous-version"]:
             return None
 
         return 1
 
     def install(self, node, version):
 
         target_dir = "/tmp/rpm-%s" % version
         src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version)
 
         rc = self.CM.rsh(node, "mkdir -p %s" % target_dir)
         rc = self.CM.cp("%s/*.rpm %s:%s" % (src_dir, node, target_dir))
         rc = self.CM.rsh(node, "rpm -Uvh --force %s/*.rpm" % (target_dir))
 
         return self.success()
 
     def upgrade(self, node):
         return self.install(node, self.CM.Env["current-version"])
 
     def downgrade(self, node):
         return self.install(node, self.CM.Env["previous-version"])
 
     def SetUp(self, CM):
         CM.prepare()
 
         # Clear out the cobwebs
         CM.stopall()
 
         CM.log("Downgrading all nodes to %s." % self.Env["previous-version"])
 
         for node in self.Env["nodes"]:
             if not self.downgrade(node):
                 CM.log("Couldn't downgrade %s" % node)
                 return None
 
         return 1
 
     def TearDown(self, CM):
         # Stop everything
         CM.log("Stopping Cluster Manager on Upgrade nodes.")
         CM.stopall()
 
         CM.log("Upgrading all nodes to %s." % self.Env["current-version"])
         for node in self.Env["nodes"]:
             if not self.upgrade(node):
                 CM.log("Couldn't upgrade %s" % node)
                 return None
 
         return 1
diff --git a/cts/cts b/cts/cts
index 5390813750..15b4c58497 100755
--- a/cts/cts
+++ b/cts/cts
@@ -1,238 +1,247 @@
 #!/bin/bash
 
 cts_root=`dirname $0`
 stonith=rhcs
 schema=1.1
 stack=mcp
 
 logfile=0
 summary=0
 verbose=0
 pengine=0
 watch=0
 saved=0
 tests=""
 
 install=0
 clean=0
 build=0
 kill=0
 run=0
+boot=0
 
 custom_log=""
 patterns="-e CTS:"
 
 # Note the quotes around `$TEMP': they are essential!
 #TEMP=`getopt -o t:ac:sSvpe:lwf:d  --long run,clean -n 'cts' -- "$@"`
 #eval set -- "$TEMP"
 
 while true; do
     case $1 in
 	-x) set -x; shift;;
 	-a)
 	screen -ls | grep cts
 	exit 0;;
 	-c|-g) cluster_name=$2; shift; shift;;
 	-S) summary=1; saved=1; shift;;
 	-s) summary=1; shift;;
 	-v) verbose=`expr $verbose + 1`; shift;;
 	-p) pengine=1; shift;;
 	-e) patterns="$patterns -e `echo $2 | sed 's/ /\\\W/g'`"; shift; shift;;
 	-l) logfile=1; shift;;
 	-w) watch=1; shift;;
 	-f) summary=1; custom_log=$2; shift; shift;;
 	-t) tests="$tests $2"; shift; shift;;
 	[0-9]*) tests="$tests $1"; shift;;
 	--build|build) build=1; shift;;
 	--kill|kill) kill=1; shift; break;;
 	--run|run) run=1; shift; break;;
+	--boot|boot) boot=1; clean=1; shift; break;;
 	--clean|clean) clean=1; shift;;
 	--inst|--install|install) install=1; clean=1; shift;;
 	local-init)
 	    local_root=
 	    case $cts_root in
 		/*) local_root=`dirname $cts_root`;;
 		*)  local_root=`dirname $PWD/$cts_root`;;
 	    esac
 
 	    cat << EOF > $cts_root/CTSvars.py
 class CTSvars:
         CTS_home="$local_root/cts"
         Fencing_home="$local_root/fencing"
         CRM_CONFIG_DIR="/var/lib/pacemaker/cib"
         CRM_DAEMON_USER="hacluster"
         CRM_DAEMON_DIR="/usr/libexec/pacemaker"
         OCF_ROOT_DIR="/usr/lib/ocf"
 EOF
 
 	    files="extra/cluster-init extra/cluster-helper extra/cluster-clean tools/crm_report.in"
 	    for f in $files; do
 		cp $local_root/$f $cts_root/
 	    done
 
 	    cp $local_root/tools/report.common.in $local_root/tools/report.common
 	    sed -i.sed s:@localstatedir@:/var: $local_root/tools/report.common
 
 	    cp $cts_root/crm_report.in $cts_root/crm_report
 	    sed -i.sed s:@datadir@/@PACKAGE@:$local_root/tools: $cts_root/crm_report
 	    chmod +x $cts_root/crm_report
 
 	    cp $cts_root/LSBDummy.in $cts_root/LSBDummy
 	    chmod +x $local_root/fencing/fence_*
 	    sed -i.sed s:@OCF_ROOT_DIR@:/usr/lib/ocf: $cts_root/LSBDummy
 	    exit 0
 	    ;;
 
 	--wget)
 	    files="cluster-helper cluster-clean"
 	    for f in $files; do
 		rm -f $cts_root/$f
 		echo "Downloading helper script $f from GitHub"
 		wget -O $cts_root/$f https://raw.github.com/ClusterLabs/pacemaker/master/extra/$f
 		chmod +x $cts_root/$f
 	    done
 	    shift
 	    ;;
 	--) shift; tests="$tests $*"; break;;
 	"") break;;
 	*) echo "Unknown argument: $1"; exit 1;;
     esac
 done
 
 # Add the location of this script
 export PATH="$PATH:$cts_root"
 which cluster-helper &>/dev/null
 if [ $? != 0 ]; then
     echo $0 needs the cluster-helper script to be in your path
     echo You can obtain it from: https://raw.github.com/ClusterLabs/pacemaker/master/extra/cluster-helper
     exit 1
 fi
 
 which cluster-clean &>/dev/null
 if [ $? != 0 ]; then
     echo $0 needs the cluster-clean script to be in your path
     echo You can obtain it from: https://raw.github.com/ClusterLabs/pacemaker/master/extra/cluster-clean
     exit 1
 fi
 
 if [ "x$cluster_name" = x -o "x$cluster_name" = xpick ]; then
     clusters=`ls -1 ~/.dsh/group/[a-z]+[0-9] | sed s/.*group.// | tr '\n' ' ' `
 
     echo "custom) interactively define a cluster"
     for i in $clusters; do
 	echo "$i) `cluster-helper --list short -g $i`"
     done
 
     read -p "Choose a cluster [custom]: " cluster_name
     echo
 fi
 
 if [ -z $cluster_name ]; then
     cluster_name=custom
 fi
 
 
 case $cluster_name in
   *)
     cluster_hosts=`cluster-helper --list short -g $cluster_name`
     cluster_log=~/cluster-$cluster_name.log;
     ;;
   custom)
     read -p "Cluster name: " cluster_name
     read -p "Cluster hosts: " cluster_hosts
     read -p "Cluster log file: " cluster_log
     ;;
 esac
 
 if [ $build = 1 -a $run = 1 ]; then
     install=1
     clean=1
 fi
 
 if [ $build = 1 ]; then
     which build-pcmk
     if [ $? != 0 ]; then
 	echo "You'll need to write/obtain build-pcmk in order to build pacemaker from here.  Skipping"
     else
 	build-pcmk -18
 	rc=$?
 	if [ $rc != 0 ]; then
 	    echo "Build failed: $rc"
 	    exit $rc
 	fi
     fi
 fi
 
 if [ $clean = 1 ]; then
     rm -f $cluster_log; cluster-clean -g $cluster_name --kill
 elif [ $kill = 1 ]; then
     cluster-clean -g $cluster_name --kill-only
 fi
 
 if [ $install = 1 ]; then
     cluster-helper -g $cluster_name -- yum install -y pacemaker pacemaker-debuginfo pacemaker-cts libqb libqb-debuginfo
 fi
 
-if [ $run = 1 ]; then
+if [ $boot = 1 ]; then
+    $cts_root/CTSlab.py -g $cluster_name -r --stonith $stonith -c --schema pacemaker-$schema --stack $stack --boot
+    rc=$?
+    if [ $rc = 0 ]; then
+	echo "The cluster is ready..."
+    fi
+    exit $rc
+elif [ $run = 1 ]; then
     $cts_root/CTSlab.py -g $cluster_name -r --stonith $stonith -c --schema pacemaker-$schema --stack $stack 500 $*
     exit $?
 
 elif [ $clean = 1 ]; then
     exit 0
 fi
 
 screen -ls | grep cts-$cluster_name &>/dev/null
 active=$?
 
 if [ ! -z $custom_log ]; then
     cluster_log=$custom_log
 fi
 
 if [ "x$tests" != x -a "x$tests" != "x " ]; then
     for t in $tests; do
 	echo "crm_report --cts-log $cluster_log -d -T $t"
 	crm_report --cts-log $cluster_log -d -T $t
     done
 
 elif [ $logfile = 1 ]; then
     echo $cluster_log
 
 elif [ $summary = 1 ]; then
     files=$cluster_log
     if [ $saved = 1 ]; then
 	files=`ls -1tr ~/CTS-*/cluster-log.txt`
     fi
     for f in $files; do
 	echo $f
 	case $verbose in
 	    0) cat -n $f | grep $patterns | grep -v "CTS: debug:"
     		;;
 	    1) cat -n $f | grep $patterns | grep -v "CTS:.* cmd:"
     		;;
 	    *) cat -n $f | grep $patterns
 		;;
 	esac
 	echo ""
     done
 
 elif [ $watch = 1 ]; then
     case $verbose in
 	0) tail -F $cluster_log | grep $patterns | grep -v "CTS: debug:"
     		;;
 	1) tail -F $cluster_log | grep $patterns | grep -v "CTS:.* cmd:"
     		;;
 	*) tail -F $cluster_log | grep $patterns
 		;;
     esac
 
 elif [ $active = 0 ]; then
     screen -x cts-$cluster_name
 
 else
     touch $cluster_log
 
 #    . ~/.bashrc
 #    . $BASH_FILES/.heartbeat
     export cluster_name cluster_hosts cluster_log
     screen -S cts-$cluster_name bash
 fi