Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/cts/lab/CIB.py b/cts/lab/CIB.py
index b6c0f98460..defbc1850b 100644
--- a/cts/lab/CIB.py
+++ b/cts/lab/CIB.py
@@ -1,518 +1,518 @@
""" CIB generator for Pacemaker's Cluster Test Suite (CTS)
"""
__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import os
import warnings
import tempfile
from pacemaker.buildoptions import BuildOptions
class CibBase(object):
def __init__(self, Factory, tag, _id, **kwargs):
self.tag = tag
self.name = _id
self.kwargs = kwargs
self.children = []
self.Factory = Factory
def __repr__(self):
return "%s-%s" % (self.tag, self.name)
def add_child(self, child):
self.children.append(child)
def __setitem__(self, key, value):
if value:
self.kwargs[key] = value
else:
self.kwargs.pop(key, None)
from cts.cib_xml import *
class ConfigBase(object):
cts_cib = None
version = "unknown"
Factory = None
def __init__(self, CM, factory, tmpfile=None):
self.CM = CM
self.Factory = factory
if not tmpfile:
warnings.filterwarnings("ignore")
f=tempfile.NamedTemporaryFile(delete=True)
f.close()
tmpfile = f.name
warnings.resetwarnings()
self.Factory.tmpfile = tmpfile
def version(self):
return self.version
def NextIP(self):
ip = self.CM.Env["IPBase"]
if ":" in ip:
(prefix, sep, suffix) = ip.rpartition(":")
suffix = str(hex(int(suffix, 16)+1)).lstrip("0x")
else:
(prefix, sep, suffix) = ip.rpartition(".")
suffix = str(int(suffix)+1)
ip = prefix + sep + suffix
self.CM.Env["IPBase"] = ip
return ip.strip()
class CIB12(ConfigBase):
version = "pacemaker-1.2"
counter = 1
def _show(self, command=""):
output = ""
(_, result) = self.Factory.rsh(self.Factory.target, "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin -Ql "+command, verbose=1)
for line in result:
output += line
self.Factory.debug("Generated Config: "+line)
return output
def NewIP(self, name=None, standard="ocf"):
if self.CM.Env["IPagent"] == "IPaddr2":
ip = self.NextIP()
if not name:
if ":" in ip:
(prefix, sep, suffix) = ip.rpartition(":")
name = "r"+suffix
else:
name = "r"+ip
r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
r["ip"] = ip
if ":" in ip:
r["cidr_netmask"] = "64"
r["nic"] = "eth0"
else:
r["cidr_netmask"] = "32"
else:
if not name:
name = "r%s%d" % (self.CM.Env["IPagent"], self.counter)
self.counter = self.counter + 1
r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
r.add_op("monitor", "5s")
return r
def get_node_id(self, node_name):
""" Check the cluster configuration for a node ID. """
# We can't account for every possible configuration,
# so we only return a node ID if:
# * The node is specified in /etc/corosync/corosync.conf
# with "ring0_addr:" equal to node_name and "nodeid:"
# explicitly specified.
# In all other cases, we return 0.
node_id = 0
# awkward command: use } as record separator
# so each corosync.conf "object" is one record;
# match the "node {" record that has "ring0_addr: node_name";
# then print the substring of that record after "nodeid:"
(rc, output) = self.Factory.rsh(self.Factory.target,
r"""awk -v RS="}" """
r"""'/^(\s*nodelist\s*{)?\s*node\s*{.*(ring0_addr|name):\s*%s(\s+|$)/"""
r"""{gsub(/.*nodeid:\s*/,"");gsub(/\s+.*$/,"");print}' %s"""
% (node_name, BuildOptions.COROSYNC_CONFIG_FILE), verbose=1)
if rc == 0 and len(output) == 1:
try:
node_id = int(output[0])
except ValueError:
node_id = 0
return node_id
def install(self, target):
old = self.Factory.tmpfile
# Force a rebuild
self.cts_cib = None
self.Factory.tmpfile = BuildOptions.CIB_DIR + "/cib.xml"
self.contents(target)
self.Factory.rsh(self.Factory.target, "chown " + BuildOptions.DAEMON_USER + " " + self.Factory.tmpfile)
self.Factory.tmpfile = old
def contents(self, target=None):
# fencing resource
if self.cts_cib:
return self.cts_cib
if target:
self.Factory.target = target
self.Factory.rsh(self.Factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self.Factory.tmpfile))
self.num_nodes = len(self.CM.Env["nodes"])
no_quorum = "stop"
if self.num_nodes < 3:
no_quorum = "ignore"
self.Factory.log("Cluster only has %d nodes, configuring: no-quorum-policy=ignore" % self.num_nodes)
# We don't need a nodes section unless we add attributes
stn = None
# Fencing resource
# Define first so that the shell doesn't reject every update
if self.CM.Env["DoFencing"]:
# Define the "real" fencing device
st = Resource(self.Factory, "Fencing", ""+self.CM.Env["stonith-type"], "stonith")
# Set a threshold for unreliable stonith devices such as the vmware one
st.add_meta("migration-threshold", "5")
st.add_op("monitor", "120s", timeout="120s")
st.add_op("stop", "0", timeout="60s")
st.add_op("start", "0", timeout="60s")
# For remote node tests, a cluster node is stopped and brought back up
# as a remote node with the name "remote-OLDNAME". To allow fencing
# devices to fence these nodes, create a list of all possible node names.
all_node_names = [ prefix+n for n in self.CM.Env["nodes"] for prefix in ('', 'remote-') ]
# Add all parameters specified by user
entries = self.CM.Env["stonith-params"].split(',')
for entry in entries:
try:
(name, value) = entry.split('=', 1)
except ValueError:
print("Warning: skipping invalid fencing parameter: %s" % entry)
continue
# Allow user to specify "all" as the node list, and expand it here
if name in [ "hostlist", "pcmk_host_list" ] and value == "all":
value = ' '.join(all_node_names)
st[name] = value
st.commit()
# Test advanced fencing logic
if True:
stf_nodes = []
stt_nodes = []
attr_nodes = {}
# Create the levels
stl = FencingTopology(self.Factory)
for node in self.CM.Env["nodes"]:
# Remote node tests will rename the node
remote_node = "remote-" + node
# Randomly assign node to a fencing method
ftype = self.CM.Env.RandomGen.choice(["levels-and", "levels-or ", "broadcast "])
# For levels-and, randomly choose targeting by node name or attribute
by = ""
if ftype == "levels-and":
node_id = self.get_node_id(node)
if node_id == 0 or self.CM.Env.RandomGen.choice([True, False]):
by = " (by name)"
else:
attr_nodes[node] = node_id
by = " (by attribute)"
self.CM.log(" - Using %s fencing for node: %s%s" % (ftype, node, by))
if ftype == "levels-and":
# If targeting by name, add a topology level for this node
if node not in attr_nodes:
stl.level(1, node, "FencingPass,Fencing")
# Always target remote nodes by name, otherwise we would need to add
# an attribute to the remote node only during remote tests (we don't
# want nonexistent remote nodes showing up in the non-remote tests).
# That complexity is not worth the effort.
stl.level(1, remote_node, "FencingPass,Fencing")
# Add the node (and its remote equivalent) to the list of levels-and nodes.
stt_nodes.extend([node, remote_node])
elif ftype == "levels-or ":
for n in [ node, remote_node ]:
stl.level(1, n, "FencingFail")
stl.level(2, n, "Fencing")
stf_nodes.extend([node, remote_node])
# If any levels-and nodes were targeted by attribute,
# create the attributes and a level for the attribute.
if attr_nodes:
stn = Nodes(self.Factory)
for (node_name, node_id) in list(attr_nodes.items()):
stn.add_node(node_name, node_id, { "cts-fencing" : "levels-and" })
stl.level(1, None, "FencingPass,Fencing", "cts-fencing", "levels-and")
# Create a Dummy agent that always passes for levels-and
if len(stt_nodes):
stt = Resource(self.Factory, "FencingPass", "fence_dummy", "stonith")
stt["pcmk_host_list"] = " ".join(stt_nodes)
# Wait this many seconds before doing anything, handy for letting disks get flushed too
stt["random_sleep_range"] = "30"
stt["mode"] = "pass"
stt.commit()
# Create a Dummy agent that always fails for levels-or
if len(stf_nodes):
stf = Resource(self.Factory, "FencingFail", "fence_dummy", "stonith")
stf["pcmk_host_list"] = " ".join(stf_nodes)
# Wait this many seconds before doing anything, handy for letting disks get flushed too
stf["random_sleep_range"] = "30"
stf["mode"] = "fail"
stf.commit()
# Now commit the levels themselves
stl.commit()
o = Option(self.Factory)
o["stonith-enabled"] = self.CM.Env["DoFencing"]
o["start-failure-is-fatal"] = "false"
o["pe-input-series-max"] = "5000"
o["shutdown-escalation"] = "5min"
o["batch-limit"] = "10"
o["dc-deadtime"] = "5s"
o["no-quorum-policy"] = no_quorum
- if self.CM.Env["DoBSC"] == 1:
+ if self.CM.Env["DoBSC"]:
o["ident-string"] = "Linux-HA TEST configuration file - REMOVEME!!"
o.commit()
o = OpDefaults(self.Factory)
o["timeout"] = "90s"
o.commit()
# Commit the nodes section if we defined one
if stn is not None:
stn.commit()
# Add an alerts section if possible
if self.Factory.rsh.exists_on_all(self.CM.Env["notification-agent"], self.CM.Env["nodes"]):
alerts = Alerts(self.Factory)
alerts.add_alert(self.CM.Env["notification-agent"],
self.CM.Env["notification-recipient"])
alerts.commit()
# Add resources?
if self.CM.Env["CIBResource"]:
self.add_resources()
if self.CM.cluster_monitor == 1:
mon = Resource(self.Factory, "cluster_mon", "ocf", "ClusterMon", "pacemaker")
mon.add_op("start", "0", requires="nothing")
mon.add_op("monitor", "5s", requires="nothing")
mon["update"] = "10"
mon["extra_options"] = "-r -n"
mon["user"] = "abeekhof"
mon["htmlfile"] = "/suse/abeekhof/Export/cluster.html"
mon.commit()
#self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''')
# generate cib
self.cts_cib = self._show()
if self.Factory.tmpfile != BuildOptions.CIB_DIR + "/cib.xml":
self.Factory.rsh(self.Factory.target, "rm -f "+self.Factory.tmpfile)
return self.cts_cib
def add_resources(self):
# Per-node resources
for node in self.CM.Env["nodes"]:
name = "rsc_"+node
r = self.NewIP(name)
r.prefer(node, "100")
r.commit()
# Migrator
# Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach
m = Resource(self.Factory, "migrator","Dummy", "ocf", "pacemaker")
m["passwd"] = "whatever"
m.add_meta("resource-stickiness","1")
m.add_meta("allow-migrate", "1")
m.add_op("monitor", "P10S")
m.commit()
# Ping the test exerciser
p = Resource(self.Factory, "ping-1","ping", "ocf", "pacemaker")
p.add_op("monitor", "60s")
p["host_list"] = self.CM.Env["cts-exerciser"]
p["name"] = "connected"
p["debug"] = "true"
c = Clone(self.Factory, "Connectivity", p)
c["globally-unique"] = "false"
c.commit()
# promotable clone resource
s = Resource(self.Factory, "stateful-1", "Stateful", "ocf", "pacemaker")
s.add_op("monitor", "15s", timeout="60s")
s.add_op("monitor", "16s", timeout="60s", role="Promoted")
ms = Clone(self.Factory, "promotable-1", s)
ms["promotable"] = "true"
ms["clone-max"] = self.num_nodes
ms["clone-node-max"] = 1
ms["promoted-max"] = 1
ms["promoted-node-max"] = 1
# Require connectivity to run the promotable clone
r = Rule(self.Factory, "connected", "-INFINITY", op="or")
r.add_child(Expression(self.Factory, "m1-connected-1", "connected", "lt", "1"))
r.add_child(Expression(self.Factory, "m1-connected-2", "connected", "not_defined", None))
ms.prefer("connected", rule=r)
ms.commit()
# Group Resource
g = Group(self.Factory, "group-1")
g.add_child(self.NewIP())
if self.CM.Env["have_systemd"]:
sysd = Resource(self.Factory, "petulant",
"pacemaker-cts-dummyd@10", "service")
sysd.add_op("monitor", "P10S")
g.add_child(sysd)
else:
g.add_child(self.NewIP())
g.add_child(self.NewIP())
# Make group depend on the promotable clone
g.after("promotable-1", first="promote", then="start")
g.colocate("promotable-1", "INFINITY", withrole="Promoted")
g.commit()
# LSB resource
lsb = Resource(self.Factory, "lsb-dummy", "LSBDummy", "lsb")
lsb.add_op("monitor", "5s")
# LSB with group
lsb.after("group-1")
lsb.colocate("group-1")
lsb.commit()
class CIB20(CIB12):
version = "pacemaker-2.5"
class CIB30(CIB12):
version = "pacemaker-3.7"
#class HASI(CIB10):
# def add_resources(self):
# # DLM resource
# self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''')
# self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''')
# O2CB resource
# self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''')
# self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''')
# self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''')
# self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''')
class ConfigFactory(object):
def __init__(self, CM):
self.CM = CM
self.rsh = self.CM.rsh
self.register("pacemaker12", CIB12, CM, self)
self.register("pacemaker20", CIB20, CM, self)
self.register("pacemaker30", CIB30, CM, self)
# self.register("hae", HASI, CM, self)
if not self.CM.Env["ListTests"]:
self.target = self.CM.Env["nodes"][0]
self.tmpfile = None
def log(self, args):
self.CM.log("cib: %s" % args)
def debug(self, args):
self.CM.debug("cib: %s" % args)
def register(self, methodName, constructor, *args, **kargs):
"""register a constructor"""
_args = [constructor]
_args.extend(args)
setattr(self, methodName, ConfigFactoryItem(*_args, **kargs))
def unregister(self, methodName):
"""unregister a constructor"""
delattr(self, methodName)
def createConfig(self, name="pacemaker-1.0"):
if name == "pacemaker-1.0":
name = "pacemaker10";
elif name == "pacemaker-1.2":
name = "pacemaker12";
elif name == "pacemaker-2.0":
name = "pacemaker20";
elif name.startswith("pacemaker-3."):
name = "pacemaker30";
elif name == "hasi":
name = "hae";
if hasattr(self, name):
return getattr(self, name)()
else:
self.CM.log("Configuration variant '%s' is unknown. Defaulting to latest config" % name)
return self.pacemaker30()
class ConfigFactoryItem(object):
def __init__(self, function, *args, **kargs):
self._function = function
self._args = args
self._kargs = kargs
def __call__(self, *args, **kargs):
"""call function"""
_args = list(self._args)
_args.extend(args)
_kargs = self._kargs.copy()
_kargs.update(kargs)
return self._function(*_args,**_kargs)
if __name__ == '__main__':
""" Unit test (pass cluster node names as command line arguments) """
import cts.CTS
import cts.CM_corosync
import sys
if len(sys.argv) < 2:
print("Usage: %s <node> ..." % sys.argv[0])
sys.exit(1)
args = [
"--nodes", " ".join(sys.argv[1:]),
"--clobber-cib",
"--populate-resources",
"--stack", "corosync",
"--test-ip-base", "fe80::1234:56:7890:1000",
"--stonith", "rhcs",
]
env = CTS.CtsLab(args)
cm = CM_corosync.crm_corosync()
CibFactory = ConfigFactory(cm)
cib = CibFactory.createConfig("pacemaker-3.0")
print(cib.contents())
diff --git a/cts/lab/CTSscenarios.py b/cts/lab/CTSscenarios.py
index c7cccdf89c..79d2aab648 100644
--- a/cts/lab/CTSscenarios.py
+++ b/cts/lab/CTSscenarios.py
@@ -1,601 +1,601 @@
""" Test scenario classes for Pacemaker's Cluster Test Suite (CTS)
"""
__copyright__ = "Copyright 2000-2021 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import os
import re
import sys
import time
from cts.CTStests import CTSTest
from cts.CTSaudits import ClusterAudit
from cts.watcher import LogWatcher
class ScenarioComponent(object):
def __init__(self, Env):
self.Env = Env
def IsApplicable(self):
- '''Return TRUE if the current ScenarioComponent is applicable
+ '''Return True if the current ScenarioComponent is applicable
in the given LabEnvironment given to the constructor.
'''
raise ValueError("Abstract Class member (IsApplicable)")
def SetUp(self, CM):
'''Set up the given ScenarioComponent'''
raise ValueError("Abstract Class member (Setup)")
def TearDown(self, CM):
'''Tear down (undo) the given ScenarioComponent'''
raise ValueError("Abstract Class member (Setup)")
class Scenario(object):
(
'''The basic idea of a scenario is that of an ordered list of
ScenarioComponent objects. Each ScenarioComponent is SetUp() in turn,
and then after the tests have been run, they are torn down using TearDown()
(in reverse order).
A Scenario is applicable to a particular cluster manager iff each
ScenarioComponent is applicable.
A partially set up scenario is torn down if it fails during setup.
''')
def __init__(self, ClusterManager, Components, Audits, Tests):
"Initialize the Scenario from the list of ScenarioComponents"
self.ClusterManager = ClusterManager
self.Components = Components
self.Audits = Audits
self.Tests = Tests
self.BadNews = None
self.TestSets = []
self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0}
self.Sets = []
#self.ns=CTS.NodeStatus(self.Env)
for comp in Components:
if not issubclass(comp.__class__, ScenarioComponent):
raise ValueError("Init value must be subclass of ScenarioComponent")
for audit in Audits:
if not issubclass(audit.__class__, ClusterAudit):
raise ValueError("Init value must be subclass of ClusterAudit")
for test in Tests:
if not issubclass(test.__class__, CTSTest):
raise ValueError("Init value must be a subclass of CTSTest")
def IsApplicable(self):
(
'''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable()
'''
)
for comp in self.Components:
if not comp.IsApplicable():
return None
- return 1
+ return True
def SetUp(self):
'''Set up the Scenario. Return TRUE on success.'''
self.ClusterManager.prepare()
self.audit() # Also detects remote/local log config
self.ClusterManager.StatsMark(0)
self.ClusterManager.ns.WaitForAllNodesToComeUp(self.ClusterManager.Env["nodes"])
self.audit()
self.ClusterManager.install_support()
self.BadNews = LogWatcher(self.ClusterManager.Env["LogFileName"],
self.ClusterManager.templates.get_patterns("BadNews"),
"BadNews", 0,
kind=self.ClusterManager.Env["LogWatcher"],
hosts=self.ClusterManager.Env["nodes"])
self.BadNews.setwatch() # Call after we've figured out what type of log watching to do in LogAudit
j = 0
while j < len(self.Components):
if not self.Components[j].SetUp(self.ClusterManager):
# OOPS! We failed. Tear partial setups down.
self.audit()
self.ClusterManager.log("Tearing down partial setup")
self.TearDown(j)
return None
j = j + 1
self.audit()
return 1
def TearDown(self, max=None):
'''Tear Down the Scenario - in reverse order.'''
if max == None:
max = len(self.Components)-1
j = max
while j >= 0:
self.Components[j].TearDown(self.ClusterManager)
j = j - 1
self.audit()
self.ClusterManager.StatsExtract()
self.ClusterManager.install_support("uninstall")
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not name in self.Stats:
self.Stats[name] = 0
self.Stats[name] = self.Stats[name]+1
def run(self, Iterations):
self.ClusterManager.oprofileStart()
try:
self.run_loop(Iterations)
self.ClusterManager.oprofileStop()
except:
self.ClusterManager.oprofileStop()
raise
def run_loop(self, Iterations):
raise ValueError("Abstract Class member (run_loop)")
def run_test(self, test, testcount):
nodechoice = self.ClusterManager.Env.RandomNode()
ret = 1
where = ""
did_run = 0
self.ClusterManager.StatsMark(testcount)
self.ClusterManager.instance_errorstoignore_clear()
self.ClusterManager.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) + "[" + ("%d" % testcount).rjust(3) + "]")
starttime = test.set_timer()
if not test.setup(nodechoice):
self.ClusterManager.log("Setup failed")
ret = 0
elif not test.canrunnow(nodechoice):
self.ClusterManager.log("Skipped")
test.skipped()
else:
did_run = 1
ret = test(nodechoice)
if not test.teardown(nodechoice):
self.ClusterManager.log("Teardown failed")
if self.ClusterManager.Env["continue"]:
answer = "Y"
else:
try:
answer = input('Continue? [nY]')
except EOFError as e:
answer = "n"
if answer and answer == "n":
raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice))
ret = 0
stoptime = time.time()
self.ClusterManager.oprofileSave(testcount)
elapsed_time = stoptime - starttime
test_time = stoptime - test.get_timer()
if not test["min_time"]:
test["elapsed_time"] = elapsed_time
test["min_time"] = test_time
test["max_time"] = test_time
else:
test["elapsed_time"] = test["elapsed_time"] + elapsed_time
if test_time < test["min_time"]:
test["min_time"] = test_time
if test_time > test["max_time"]:
test["max_time"] = test_time
if ret:
self.incr("success")
test.log_timer()
else:
self.incr("failure")
self.ClusterManager.statall()
did_run = 1 # Force the test count to be incremented anyway so test extraction works
self.audit(test.errorstoignore())
return did_run
def summarize(self):
self.ClusterManager.log("****************")
self.ClusterManager.log("Overall Results:" + repr(self.Stats))
self.ClusterManager.log("****************")
stat_filter = {
"calls":0,
"failure":0,
"skipped":0,
"auditfail":0,
}
self.ClusterManager.log("Test Summary")
for test in self.Tests:
for key in list(stat_filter.keys()):
stat_filter[key] = test.Stats[key]
self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter))
self.ClusterManager.debug("Detailed Results")
for test in self.Tests:
self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats))
self.ClusterManager.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
def audit(self, LocalIgnore=[]):
errcount = 0
ignorelist = []
ignorelist.append("CTS:")
ignorelist.extend(LocalIgnore)
ignorelist.extend(self.ClusterManager.errorstoignore())
ignorelist.extend(self.ClusterManager.instance_errorstoignore())
# This makes sure everything is stabilized before starting...
failed = 0
for audit in self.Audits:
if not audit():
self.ClusterManager.log("Audit " + audit.name() + " FAILED.")
failed += 1
else:
self.ClusterManager.debug("Audit " + audit.name() + " passed.")
while errcount < 1000:
match = None
if self.BadNews:
match = self.BadNews.look(0)
if match:
add_err = 1
for ignore in ignorelist:
if add_err == 1 and re.search(ignore, match):
add_err = 0
if add_err == 1:
self.ClusterManager.log("BadNews: " + match)
self.incr("BadNews")
errcount = errcount + 1
else:
break
else:
if self.ClusterManager.Env["continue"]:
answer = "Y"
else:
try:
answer = input('Big problems. Continue? [nY]')
except EOFError as e:
answer = "n"
if answer and answer == "n":
self.ClusterManager.log("Shutting down.")
self.summarize()
self.TearDown()
raise ValueError("Looks like we hit a BadNews jackpot!")
if self.BadNews:
self.BadNews.end()
return failed
class AllOnce(Scenario):
'''Every Test Once''' # Accessable as __doc__
def run_loop(self, Iterations):
testcount = 1
for test in self.Tests:
self.run_test(test, testcount)
testcount += 1
class RandomTests(Scenario):
'''Random Test Execution'''
def run_loop(self, Iterations):
testcount = 1
while testcount <= Iterations:
test = self.ClusterManager.Env.RandomGen.choice(self.Tests)
self.run_test(test, testcount)
testcount += 1
class BasicSanity(Scenario):
'''Basic Cluster Sanity'''
def run_loop(self, Iterations):
testcount = 1
while testcount <= Iterations:
test = self.Environment.RandomGen.choice(self.Tests)
self.run_test(test, testcount)
testcount += 1
class Sequence(Scenario):
'''Named Tests in Sequence'''
def run_loop(self, Iterations):
testcount = 1
while testcount <= Iterations:
for test in self.Tests:
self.run_test(test, testcount)
testcount += 1
class Boot(Scenario):
'''Start the Cluster'''
def run_loop(self, Iterations):
testcount = 0
class BootCluster(ScenarioComponent):
(
'''BootCluster is the most basic of ScenarioComponents.
This ScenarioComponent simply starts the cluster manager on all the nodes.
It is fairly robust as it waits for all nodes to come up before starting
as they might have been rebooted or crashed for some reason beforehand.
''')
def __init__(self, Env):
pass
def IsApplicable(self):
'''BootCluster is so generic it is always Applicable'''
- return 1
+ return True
def SetUp(self, CM):
'''Basic Cluster Manager startup. Start everything'''
CM.prepare()
# Clear out the cobwebs ;-)
CM.stopall(verbose=True, force=True)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on all nodes.")
return CM.startall(verbose=True, quick=True)
def TearDown(self, CM, force=False):
'''Set up the given ScenarioComponent'''
# Stop the cluster manager everywhere
CM.log("Stopping Cluster Manager on all nodes")
return CM.stopall(verbose=True, force=force)
class LeaveBooted(BootCluster):
def TearDown(self, CM):
'''Set up the given ScenarioComponent'''
# Stop the cluster manager everywhere
CM.log("Leaving Cluster running on all nodes")
return 1
class PingFest(ScenarioComponent):
(
'''PingFest does a flood ping to each node in the cluster from the test machine.
If the LabEnvironment Parameter PingSize is set, it will be used as the size
of ping packet requested (via the -s option). If it is not set, it defaults
to 1024 bytes.
According to the manual page for ping:
Outputs packets as fast as they come back or one hundred times per
second, whichever is more. For every ECHO_REQUEST sent a period ``.''
is printed, while for every ECHO_REPLY received a backspace is printed.
This provides a rapid display of how many packets are being dropped.
Only the super-user may use this option. This can be very hard on a net-
work and should be used with caution.
''' )
def __init__(self, Env):
self.Env = Env
def IsApplicable(self):
'''PingFests are always applicable ;-)
'''
- return 1
+ return True
def SetUp(self, CM):
'''Start the PingFest!'''
self.PingSize = 1024
if "PingSize" in list(CM.Env.keys()):
self.PingSize = CM.Env["PingSize"]
CM.log("Starting %d byte flood pings" % self.PingSize)
self.PingPids = []
for node in CM.Env["nodes"]:
self.PingPids.append(self._pingchild(node))
CM.log("Ping PIDs: " + repr(self.PingPids))
return 1
def TearDown(self, CM):
'''Stop it right now! My ears are pinging!!'''
for pid in self.PingPids:
if pid != None:
CM.log("Stopping ping process %d" % pid)
os.kill(pid, signal.SIGKILL)
def _pingchild(self, node):
Args = ["ping", "-qfn", "-s", str(self.PingSize), node]
sys.stdin.flush()
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if pid < 0:
self.Env.log("Cannot fork ping child")
return None
if pid > 0:
return pid
# Otherwise, we're the child process.
os.execvp("ping", Args)
self.Env.log("Cannot execvp ping: " + repr(Args))
sys.exit(1)
class PacketLoss(ScenarioComponent):
(
'''
It would be useful to do some testing of CTS with a modest amount of packet loss
enabled - so we could see that everything runs like it should with a certain
amount of packet loss present.
''')
def IsApplicable(self):
'''always Applicable'''
- return 1
+ return True
def SetUp(self, CM):
'''Reduce the reliability of communications'''
if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 :
return 1
for node in CM.Env["nodes"]:
CM.reducecomm_node(node)
CM.log("Reduce the reliability of communications")
return 1
def TearDown(self, CM):
'''Fix the reliability of communications'''
if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 :
return 1
for node in CM.Env["nodes"]:
CM.unisolate_node(node)
CM.log("Fix the reliability of communications")
class BasicSanityCheck(ScenarioComponent):
(
'''
''')
def IsApplicable(self):
return self.Env["DoBSC"]
def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
self.TearDown(CM)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on BSC node(s).")
return CM.startall()
def TearDown(self, CM):
CM.log("Stopping Cluster Manager on BSC node(s).")
return CM.stopall()
class Benchmark(ScenarioComponent):
(
'''
''')
def IsApplicable(self):
return self.Env["benchmark"]
def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
self.TearDown(CM, force=True)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on all node(s).")
return CM.startall()
def TearDown(self, CM):
CM.log("Stopping Cluster Manager on all node(s).")
return CM.stopall()
class RollingUpgrade(ScenarioComponent):
(
'''
Test a rolling upgrade between two versions of the stack
''')
def __init__(self, Env):
self.Env = Env
def IsApplicable(self):
if not self.Env["rpm-dir"]:
return None
if not self.Env["current-version"]:
return None
if not self.Env["previous-version"]:
return None
- return 1
+ return True
def install(self, node, version):
target_dir = "/tmp/rpm-%s" % version
src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version)
self.CM.rsh(node, "mkdir -p %s" % target_dir)
rc = self.CM.cp("%s/*.rpm %s:%s" % (src_dir, node, target_dir))
self.CM.rsh(node, "rpm -Uvh --force %s/*.rpm" % (target_dir))
return self.success()
def upgrade(self, node):
return self.install(node, self.CM.Env["current-version"])
def downgrade(self, node):
return self.install(node, self.CM.Env["previous-version"])
def SetUp(self, CM):
print(repr(self)+"prepare")
CM.prepare()
# Clear out the cobwebs
CM.stopall(force=True)
CM.log("Downgrading all nodes to %s." % self.Env["previous-version"])
for node in self.Env["nodes"]:
if not self.downgrade(node):
CM.log("Couldn't downgrade %s" % node)
return None
return 1
def TearDown(self, CM):
# Stop everything
CM.log("Stopping Cluster Manager on Upgrade nodes.")
CM.stopall()
CM.log("Upgrading all nodes to %s." % self.Env["current-version"])
for node in self.Env["nodes"]:
if not self.upgrade(node):
CM.log("Couldn't upgrade %s" % node)
return None
return 1
diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py
index 4d2bf99c58..e8b6ff4d87 100644
--- a/python/pacemaker/_cts/environment.py
+++ b/python/pacemaker/_cts/environment.py
@@ -1,638 +1,638 @@
""" Test environment classes for Pacemaker's Cluster Test Suite (CTS)
"""
__copyright__ = "Copyright 2014-2023 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import sys, time, os, socket, random
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
class Environment(object):
def __init__(self, args):
self.data = {}
self.Nodes = []
self["DeadTime"] = 300
self["StartTime"] = 300
self["StableTime"] = 30
self["tests"] = []
self["IPagent"] = "IPaddr2"
self["DoFencing"] = True
self["XmitLoss"] = "0.0"
self["RecvLoss"] = "0.0"
self["ClobberCIB"] = False
self["CIBfilename"] = None
self["CIBResource"] = False
- self["DoBSC"] = 0
+ self["DoBSC"] = False
self["oprofile"] = []
self["warn-inactive"] = False
self["ListTests"] = False
- self["benchmark"] = 0
+ self["benchmark"] = False
self["LogWatcher"] = "any"
self["SyslogFacility"] = "daemon"
self["LogFileName"] = "/var/log/messages"
self["Schema"] = "pacemaker-3.0"
self["Stack"] = "corosync"
self["stonith-type"] = "external/ssh"
self["stonith-params"] = "hostlist=all,livedangerously=yes"
self["notification-agent"] = "/var/lib/pacemaker/notify.sh"
self["notification-recipient"] = "/var/lib/pacemaker/notify.log"
self["loop-minutes"] = 60
self["valgrind-procs"] = "pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd"
self["experimental-tests"] = False
self["container-tests"] = False
self["valgrind-tests"] = False
self["unsafe-tests"] = True
self["loop-tests"] = True
self["scenario"] = "random"
self["stats"] = False
self["continue"] = False
self.RandomGen = random.Random()
self.logger = LogFactory()
self.SeedRandom()
self.rsh = RemoteFactory().getInstance()
self.target = "localhost"
self.parse_args(args)
if not self["ListTests"]:
self.validate()
self.discover()
def SeedRandom(self, seed=None):
if not seed:
seed = int(time.time())
self["RandSeed"] = seed
self.RandomGen.seed(str(seed))
def dump(self):
keys = []
for key in list(self.data.keys()):
keys.append(key)
keys.sort()
for key in keys:
self.logger.debug("Environment["+key+"]:\t"+str(self[key]))
def keys(self):
return list(self.data.keys())
def has_key(self, key):
if key == "nodes":
return True
return key in self.data
def __getitem__(self, key):
if str(key) == "0":
raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead")
if key == "nodes":
return self.Nodes
elif key == "Name":
return self.get_stack_short()
elif key in self.data:
return self.data[key]
else:
return None
def __setitem__(self, key, value):
if key == "Stack":
self.set_stack(value)
elif key == "node-limit":
self.data[key] = value
self.filter_nodes()
elif key == "nodes":
self.Nodes = []
for node in value:
# I don't think I need the IP address, etc. but this validates
# the node name against /etc/hosts and/or DNS, so it's a
# GoodThing(tm).
try:
n = node.strip()
socket.gethostbyname_ex(n)
self.Nodes.append(n)
except:
self.logger.log(node+" not found in DNS... aborting")
raise
self.filter_nodes()
else:
self.data[key] = value
def RandomNode(self):
'''Choose a random node from the cluster'''
return self.RandomGen.choice(self["nodes"])
def set_stack(self, name):
# Normalize stack names
if name == "corosync" or name == "cs" or name == "mcp":
self.data["Stack"] = "corosync 2+"
else:
raise ValueError("Unknown stack: "+name)
def get_stack_short(self):
# Create the Cluster Manager object
if not "Stack" in self.data:
return "unknown"
elif self.data["Stack"] == "corosync 2+":
return "crm-corosync"
else:
LogFactory().log("Unknown stack: "+self["stack"])
raise ValueError("Unknown stack: "+self["stack"])
def detect_syslog(self):
# Detect syslog variant
if not "syslogd" in self.data:
if self["have_systemd"]:
# Systemd
(_, lines) = self.rsh(self.target, "systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", verbose=1)
self["syslogd"] = lines[0].strip()
else:
# SYS-V
(_, lines) = self.rsh(self.target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", verbose=1)
self["syslogd"] = lines[0].strip()
if not "syslogd" in self.data or not self["syslogd"]:
# default
self["syslogd"] = "rsyslog"
def disable_service(self, node, service):
if self["have_systemd"]:
# Systemd
(rc, _) = self.rsh(node, "systemctl disable %s" % service)
return rc
else:
# SYS-V
(rc, _) = self.rsh(node, "chkconfig %s off" % service)
return rc
def enable_service(self, node, service):
if self["have_systemd"]:
# Systemd
(rc, _) = self.rsh(node, "systemctl enable %s" % service)
return rc
else:
# SYS-V
(rc, _) = self.rsh(node, "chkconfig %s on" % service)
return rc
def service_is_enabled(self, node, service):
if self["have_systemd"]:
# Systemd
# With "systemctl is-enabled", we should check if the service is
# explicitly "enabled" instead of the return code. For example it returns
# 0 if the service is "static" or "indirect", but they don't really count
# as "enabled".
(rc, _) = self.rsh(node, "systemctl is-enabled %s | grep enabled" % service)
return rc == 0
else:
# SYS-V
(rc, _) = self.rsh(node, "chkconfig --list | grep -e %s.*on" % service)
return rc == 0
def detect_at_boot(self):
# Detect if the cluster starts at boot
if not "at-boot" in self.data:
self["at-boot"] = self.service_is_enabled(self.target, "corosync") \
or self.service_is_enabled(self.target, "pacemaker")
def detect_ip_offset(self):
# Try to determine an offset for IPaddr resources
if self["CIBResource"] and not "IPBase" in self.data:
(_, lines) = self.rsh(self.target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0)
network = lines[0].strip()
(_, lines) = self.rsh(self.target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, verbose=0)
self["IPBase"] = lines[0].strip()
if not self["IPBase"]:
self["IPBase"] = " fe80::1234:56:7890:1000"
self.logger.log("Could not determine an offset for IPaddr resources. Perhaps nmap is not installed on the nodes.")
self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
elif int(self["IPBase"].split('.')[3]) >= 240:
self.logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s"
% (self["IPBase"], self["IPBase"].split('.')[3]))
self["IPBase"] = " fe80::1234:56:7890:1000"
self.logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
def filter_nodes(self):
if self['node-limit'] is not None and self["node-limit"] > 0:
if len(self["nodes"]) > self["node-limit"]:
self.logger.log("Limiting the number of nodes configured=%d (max=%d)"
%(len(self["nodes"]), self["node-limit"]))
while len(self["nodes"]) > self["node-limit"]:
self["nodes"].pop(len(self["nodes"])-1)
def validate(self):
if len(self["nodes"]) < 1:
print("No nodes specified!")
sys.exit(1)
def discover(self):
self.target = random.Random().choice(self["nodes"])
exerciser = socket.gethostname()
# Use the IP where possible to avoid name lookup failures
for ip in socket.gethostbyname_ex(exerciser)[2]:
if ip != "127.0.0.1":
exerciser = ip
break;
self["cts-exerciser"] = exerciser
if not "have_systemd" in self.data:
(rc, _) = self.rsh(self.target, "systemctl list-units", verbose=0)
self["have_systemd"] = rc == 0
self.detect_syslog()
self.detect_at_boot()
self.detect_ip_offset()
def parse_args(self, args):
skipthis=None
if not args:
args=sys.argv[1:]
for i in range(0, len(args)):
if skipthis:
skipthis=None
continue
elif args[i] == "-l" or args[i] == "--limit-nodes":
skipthis=1
self["node-limit"] = int(args[i+1])
elif args[i] == "-r" or args[i] == "--populate-resources":
self["CIBResource"] = True
self["ClobberCIB"] = True
elif args[i] == "--outputfile":
skipthis=1
self["OutputFile"] = args[i+1]
LogFactory().add_file(self["OutputFile"])
elif args[i] == "-L" or args[i] == "--logfile":
skipthis=1
self["LogWatcher"] = "remote"
self["LogAuditDisabled"] = 1
self["LogFileName"] = args[i+1]
elif args[i] == "--ip" or args[i] == "--test-ip-base":
skipthis=1
self["IPBase"] = args[i+1]
self["CIBResource"] = True
self["ClobberCIB"] = True
elif args[i] == "--oprofile":
skipthis=1
self["oprofile"] = args[i+1].split(' ')
elif args[i] == "--trunc":
self["TruncateLog"]=1
elif args[i] == "--list-tests" or args[i] == "--list" :
self["ListTests"] = True
elif args[i] == "--benchmark":
- self["benchmark"]=1
+ self["benchmark"] = True
elif args[i] == "--bsc":
- self["DoBSC"] = 1
+ self["DoBSC"] = True
self["scenario"] = "basic-sanity"
elif args[i] == "--qarsh":
RemoteFactory().enable_qarsh()
elif args[i] == "--yes" or args[i] == "-y":
self["continue"] = True
elif args[i] == "--stonith" or args[i] == "--fencing":
skipthis=1
if args[i+1] == "1" or args[i+1] == "yes":
self["DoFencing"] = True
elif args[i+1] == "0" or args[i+1] == "no":
self["DoFencing"] = False
elif args[i+1] == "rhcs" or args[i+1] == "xvm" or args[i+1] == "virt":
self["DoStonith"]=1
self["stonith-type"] = "fence_xvm"
elif args[i+1] == "scsi":
self["DoStonith"]=1
self["stonith-type"] = "fence_scsi"
elif args[i+1] == "ssh" or args[i+1] == "lha":
self["DoStonith"]=1
self["stonith-type"] = "external/ssh"
self["stonith-params"] = "hostlist=all,livedangerously=yes"
elif args[i+1] == "north":
self["DoStonith"]=1
self["stonith-type"] = "fence_apc"
self["stonith-params"] = "ipaddr=north-apc,login=apc,passwd=apc,pcmk_host_map=north-01:2;north-02:3;north-03:4;north-04:5;north-05:6;north-06:7;north-07:9;north-08:10;north-09:11;north-10:12;north-11:13;north-12:14;north-13:15;north-14:18;north-15:17;north-16:19;"
elif args[i+1] == "south":
self["DoStonith"]=1
self["stonith-type"] = "fence_apc"
self["stonith-params"] = "ipaddr=south-apc,login=apc,passwd=apc,pcmk_host_map=south-01:2;south-02:3;south-03:4;south-04:5;south-05:6;south-06:7;south-07:9;south-08:10;south-09:11;south-10:12;south-11:13;south-12:14;south-13:15;south-14:18;south-15:17;south-16:19;"
elif args[i+1] == "east":
self["DoStonith"]=1
self["stonith-type"] = "fence_apc"
self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;"
elif args[i+1] == "west":
self["DoStonith"]=1
self["stonith-type"] = "fence_apc"
self["stonith-params"] = "ipaddr=west-apc,login=apc,passwd=apc,pcmk_host_map=west-01:2;west-02:3;west-03:4;west-04:5;west-05:6;west-06:7;west-07:9;west-08:10;west-09:11;west-10:12;west-11:13;west-12:14;west-13:15;west-14:18;west-15:17;west-16:19;"
elif args[i+1] == "openstack":
self["DoStonith"]=1
self["stonith-type"] = "fence_openstack"
print("Obtaining OpenStack credentials from the current environment")
self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % (
os.environ['OS_REGION_NAME'],
os.environ['OS_TENANT_NAME'],
os.environ['OS_AUTH_URL'],
os.environ['OS_USERNAME'],
os.environ['OS_PASSWORD']
)
elif args[i+1] == "rhevm":
self["DoStonith"]=1
self["stonith-type"] = "fence_rhevm"
print("Obtaining RHEV-M credentials from the current environment")
self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
os.environ['RHEVM_USERNAME'],
os.environ['RHEVM_PASSWORD'],
os.environ['RHEVM_SERVER'],
os.environ['RHEVM_PORT'],
)
else:
self.usage(args[i+1])
elif args[i] == "--stonith-type":
self["stonith-type"] = args[i+1]
skipthis=1
elif args[i] == "--stonith-args":
self["stonith-params"] = args[i+1]
skipthis=1
elif args[i] == "--clobber-cib" or args[i] == "-c":
self["ClobberCIB"] = True
elif args[i] == "--cib-filename":
skipthis=1
self["CIBfilename"] = args[i+1]
elif args[i] == "--xmit-loss":
try:
float(args[i+1])
except ValueError:
print("--xmit-loss parameter should be float")
self.usage(args[i+1])
skipthis=1
self["XmitLoss"] = args[i+1]
elif args[i] == "--recv-loss":
try:
float(args[i+1])
except ValueError:
print("--recv-loss parameter should be float")
self.usage(args[i+1])
skipthis=1
self["RecvLoss"] = args[i+1]
elif args[i] == "--choose":
skipthis=1
self["tests"].append(args[i+1])
self["scenario"] = "sequence"
elif args[i] == "--nodes":
skipthis=1
self["nodes"] = args[i+1].split(' ')
elif args[i] == "-g" or args[i] == "--group" or args[i] == "--dsh-group":
skipthis=1
self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args[i+1])
LogFactory().add_file(self["OutputFile"], "CTS")
dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args[i+1])
# Hacks to make my life easier
if args[i+1] == "virt1":
self["Stack"] = "corosync"
self["DoStonith"]=1
self["stonith-type"] = "fence_xvm"
self["stonith-params"] = "delay=0"
self["IPBase"] = " fe80::1234:56:7890:1000"
elif args[i+1] == "east16" or args[i+1] == "nsew":
self["Stack"] = "corosync"
self["DoStonith"]=1
self["stonith-type"] = "fence_apc"
self["stonith-params"] = "ipaddr=east-apc,login=apc,passwd=apc,pcmk_host_map=east-01:2;east-02:3;east-03:4;east-04:5;east-05:6;east-06:7;east-07:9;east-08:10;east-09:11;east-10:12;east-11:13;east-12:14;east-13:15;east-14:18;east-15:17;east-16:19;"
self["IPBase"] = " fe80::1234:56:7890:2000"
if args[i+1] == "east16":
# Requires newer python than available via nsew
self["IPagent"] = "Dummy"
elif args[i+1] == "corosync8":
self["Stack"] = "corosync"
self["DoStonith"]=1
self["stonith-type"] = "fence_rhevm"
print("Obtaining RHEV-M credentials from the current environment")
self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
os.environ['RHEVM_USERNAME'],
os.environ['RHEVM_PASSWORD'],
os.environ['RHEVM_SERVER'],
os.environ['RHEVM_PORT'],
)
self["IPBase"] = " fe80::1234:56:7890:3000"
if os.path.isfile(dsh_file):
self["nodes"] = []
f = open(dsh_file, 'r')
for line in f:
l = line.strip().rstrip()
if not l.startswith('#'):
self["nodes"].append(l)
f.close()
else:
print("Unknown DSH group: %s" % args[i+1])
elif args[i] == "--syslog-facility" or args[i] == "--facility":
skipthis=1
self["SyslogFacility"] = args[i+1]
elif args[i] == "--seed":
skipthis=1
self.SeedRandom(args[i+1])
elif args[i] == "--warn-inactive":
self["warn-inactive"] = True
elif args[i] == "--schema":
skipthis=1
self["Schema"] = args[i+1]
elif args[i] == "--at-boot" or args[i] == "--cluster-starts-at-boot":
skipthis=1
if args[i+1] == "1" or args[i+1] == "yes":
self["at-boot"] = 1
elif args[i+1] == "0" or args[i+1] == "no":
self["at-boot"] = 0
else:
self.usage(args[i+1])
elif args[i] == "--stack":
if args[i+1] == "fedora" or args[i+1] == "fedora-17" or args[i+1] == "fedora-18":
self["Stack"] = "corosync"
elif args[i+1] == "rhel-7":
self["Stack"] = "corosync"
else:
self["Stack"] = args[i+1]
skipthis=1
elif args[i] == "--once":
self["scenario"] = "all-once"
elif args[i] == "--boot":
self["scenario"] = "boot"
elif args[i] == "--notification-agent":
self["notification-agent"] = args[i+1]
skipthis = 1
elif args[i] == "--notification-recipient":
self["notification-recipient"] = args[i+1]
skipthis = 1
elif args[i] == "--valgrind-tests":
self["valgrind-tests"] = True
elif args[i] == "--valgrind-procs":
self["valgrind-procs"] = args[i+1]
skipthis = 1
elif args[i] == "--no-loop-tests":
self["loop-tests"] = False
elif args[i] == "--loop-minutes":
skipthis=1
try:
self["loop-minutes"]=int(args[i+1])
except ValueError:
self.usage(args[i])
elif args[i] == "--no-unsafe-tests":
self["unsafe-tests"] = False
elif args[i] == "--experimental-tests":
self["experimental-tests"] = True
elif args[i] == "--container-tests":
self["container-tests"] = True
elif args[i] == "--set":
skipthis=1
(name, value) = args[i+1].split('=')
self[name] = value
print("Setting %s = %s" % (name, value))
elif args[i] == "--help":
self.usage(args[i], 0)
elif args[i] == "--":
break
else:
try:
NumIter=int(args[i])
self["iterations"] = NumIter
except ValueError:
self.usage(args[i])
def usage(self, arg, status=1):
if status:
print("Illegal argument %s" % arg)
print("""usage: %s [options] number-of-iterations
Common options:
\t [--nodes 'node list'] list of cluster nodes separated by whitespace
\t [--group | -g 'name'] use the nodes listed in the named DSH group (~/.dsh/groups/$name)
\t [--limit-nodes max] only use the first 'max' cluster nodes supplied with --nodes
\t [--stack corosync] which cluster stack is installed
\t [--list-tests] list the valid tests
\t [--benchmark] add the timing information
Options that CTS will usually auto-detect correctly:
\t [--logfile path] where should the test software look for logs from cluster nodes
\t [--syslog-facility name] which syslog facility should the test software log to
\t [--at-boot (1|0)] does the cluster software start at boot time
\t [--test-ip-base ip] offset for generated IP address resources
Options for release testing:
\t [--populate-resources | -r] generate a sample configuration
\t [--choose name] run only the named test
\t [--stonith (1 | 0 | yes | no | rhcs | ssh)]
\t [--once] run all valid tests once
Additional (less common) options:
\t [--clobber-cib | -c ] erase any existing configuration
\t [--outputfile path] optional location for the test software to write logs to
\t [--trunc] truncate logfile before starting
\t [--xmit-loss lost-rate(0.0-1.0)]
\t [--recv-loss lost-rate(0.0-1.0)]
\t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]
\t [--stonith-type type]
\t [--stonith-args name=value]
\t [--bsc]
\t [--notification-agent path] script to configure for Pacemaker alerts
\t [--notification-recipient r] recipient to pass to alert script
\t [--no-loop-tests] don't run looping/time-based tests
\t [--no-unsafe-tests] don't run tests that are unsafe for use with ocfs2/drbd
\t [--valgrind-tests] include tests using valgrind
\t [--experimental-tests] include experimental tests
\t [--container-tests] include pacemaker_remote tests that run in lxc container resources
\t [--oprofile 'node list'] list of cluster nodes to run oprofile on]
\t [--qarsh] use the QARSH backdoor to access nodes instead of SSH
\t [--seed random_seed]
\t [--set option=value]
\t [--yes | -y] continue to run cts when there is an interaction whether to continue running pacemaker-cts
Example:
\t python %s -g virt1 -r --stonith ssh --schema pacemaker-2.0 500""" % (sys.argv[0], sys.argv[0]))
sys.exit(status)
class EnvFactory(object):
instance = None
def __init__(self):
pass
def getInstance(self, args=None):
if not EnvFactory.instance:
EnvFactory.instance = Environment(args)
return EnvFactory.instance

File Metadata

Mime Type
text/x-diff
Expires
Thu, Jul 10, 3:28 AM (10 h, 52 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2009934
Default Alt Text
(63 KB)

Event Timeline