Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/cts/CTStests.py.in b/cts/CTStests.py.in
index 1097bfe78b..4785849ba8 100644
--- a/cts/CTStests.py.in
+++ b/cts/CTStests.py.in
@@ -1,1974 +1,1977 @@
#!@PYTHON@
'''CTS: Cluster Testing System: Tests module
There are a few things we want to do here:
'''
__copyright__='''
Copyright (C) 2000, 2001 Alan Robertson <alanr@unix.sh>
Licensed under the GNU GPL.
Add RecourceRecover testcase Zhao Kai <zhaokai@cn.ibm.com>
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# SPECIAL NOTE:
#
# Tests may NOT implement any cluster-manager-specific code in them.
# EXTEND the ClusterManager object to provide the base capabilities
# the test needs if you need to do something that the current CM classes
# do not. Otherwise you screw up the whole point of the object structure
# in CTS.
#
# Thank you.
#
import CTS
import CTSaudits
import time, os, re, types, string, tempfile, sys
from CTSaudits import *
from stat import *
# List of all class objects for tests which we ought to
# consider running.
class RandomTests:
'''
A collection of tests which are run at random.
'''
def __init__(self, scenario, cm, tests, Audits):
self.CM = cm
self.Env = cm.Env
self.Scenario = scenario
self.Tests = []
self.Audits = []
self.ns=CTS.NodeStatus(self.Env)
self.Stats = {"success":0, "failure":0, "BadNews":0}
self.IndividualStats= {}
for audit in Audits:
if not issubclass(audit.__class__, ClusterAudit):
raise ValueError("Init value must be a subclass of ClusterAudit")
if audit.is_applicable():
self.Audits.append(audit)
for test in tests:
if not issubclass(test.__class__, CTSTest):
raise ValueError("Init value must be a subclass of CTSTest")
if test.is_applicable():
self.Tests.append(test)
if not scenario.IsApplicable():
raise ValueError("Scenario not applicable in"
" given Environment")
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not self.Stats.has_key(name):
self.Stats[name]=0
self.Stats[name] = self.Stats[name]+1
def audit(self, BadNews, test):
errcount=0
BadNewsDebug=0
#BadNews.debug=1
ignorelist = []
ignorelist.append(" CTS: ")
ignorelist.append("BadNews:")
ignorelist.extend(self.CM.errorstoignore())
if test:
ignorelist.extend(test.errorstoignore())
while errcount < 1000:
if BadNewsDebug: print "Looking for BadNews"
match=BadNews.look(0)
if match:
if BadNewsDebug: print "BadNews found: "+match
add_err = 1
for ignore in ignorelist:
if add_err == 1 and re.search(ignore, match):
if BadNewsDebug: print "Ignoring based on pattern: ("+ignore+")"
add_err = 0
if add_err == 1:
self.CM.log("BadNews: " + match)
self.incr("BadNews")
errcount=errcount+1
else:
break
else:
answer = raw_input('Big problems. Continue? [nY]')
if answer and answer == "n":
self.CM.log("Shutting down.")
self.CM.stopall()
self.summarize()
raise ValueError("Looks like we hit a BadNews jackpot!")
for audit in self.Audits:
if not audit():
self.CM.log("Audit " + audit.name() + " FAILED.")
self.incr("auditfail")
if test:
test.incr("auditfail")
def summarize(self):
self.CM.log("****************")
self.CM.log("Overall Results:" + repr(self.Stats))
self.CM.log("****************")
self.CM.log("Detailed Results")
for test in self.Tests:
self.CM.log("Test %s: \t%s" %(test.name, repr(test.Stats)))
self.CM.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
def run(self, max=1):
(
'''
Set up the given scenario, then run the selected tests at
random for the selected number of iterations.
''')
BadNews=CTS.LogWatcher(self.CM["LogFileName"], self.CM["BadRegexes"]
, timeout=0)
BadNews.setwatch()
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"])
self.CM.oprofileStop()
self.CM.oprofileStart()
if not self.CM.Env["DoBSC"]:
audit = LogAudit(self.CM)
if not audit():
self.CM.log("Audit " + audit.name() + " FAILED.")
return (None, None)
else:
self.CM.log("Audit " + audit.name() + " passed.")
audit = DiskAudit(self.CM)
if not audit():
self.CM.log("Audit " + audit.name() + " FAILED.")
return (None, None)
else:
self.CM.log("Audit " + audit.name() + " passed.")
if not self.Scenario.SetUp(self.CM):
return (None, None)
self.CM.oprofileSave(0)
testcount=1
time.sleep(30)
# This makes sure everything is stabilized before starting...
self.audit(BadNews, None)
while testcount <= max:
test = self.Env.RandomGen.choice(self.Tests)
# Some tests want a node as an argument.
nodechoice = self.Env.RandomNode()
self.CM.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) +"["+ ("%d" % testcount).rjust(3) +"]")
starttime=time.time()
test.starttime=starttime
where = " - Setup"
ret = test.setup(nodechoice)
if ret:
where = ""
ret = test(nodechoice)
if not test.teardown(nodechoice):
ret = 0
where = " - Teardown"
stoptime=time.time()
self.CM.oprofileSave(testcount)
testcount = testcount + 1
if ret:
self.incr("success")
else:
# Better get the current info from the cluster...
self.CM.log("Test %s (%s) \t[FAILED%s]" %(test.name,nodechoice,where))
self.incr("failure")
self.CM.statall()
elapsed_time = stoptime - starttime
test_time = stoptime - test.starttime
if not test.has_key("min_time"):
test["elapsed_time"] = elapsed_time
test["min_time"] = test_time
test["max_time"] = test_time
else:
test["elapsed_time"] = test["elapsed_time"] + elapsed_time
if test_time < test["min_time"]:
test["min_time"] = test_time
if test_time > test["max_time"]:
test["max_time"] = test_time
self.audit(BadNews, test)
self.Scenario.TearDown(self.CM)
self.CM.oprofileSave(testcount)
self.CM.oprofileStop()
self.audit(BadNews, None)
for test in self.Tests:
self.IndividualStats[test.name] = test.Stats
return self.Stats, self.IndividualStats
AllTestClasses = [ ]
class CTSTest:
'''
A Cluster test.
We implement the basic set of properties and behaviors for a generic
cluster test.
Cluster tests track their own statistics.
We keep each of the kinds of counts we track as separate {name,value}
pairs.
'''
def __init__(self, cm):
#self.name="the unnamed test"
self.Stats = {"calls":0
, "success":0
, "failure":0
, "skipped":0
, "auditfail":0}
# if not issubclass(cm.__class__, ClusterManager):
# raise ValueError("Must be a ClusterManager object")
self.CM = cm
self.Audits = []
self.timeout=120
self.starttime=0
self.passed = 1
def has_key(self, key):
return self.Stats.has_key(key)
def __setitem__(self, key, value):
self.Stats[key] = value
def __getitem__(self, key):
return self.Stats[key]
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not self.Stats.has_key(name):
self.Stats[name]=0
self.Stats[name] = self.Stats[name]+1
# Reset the test passed boolean
if name == "calls":
self.passed = 1
def failure(self, reason="none"):
'''Increment the failure count'''
self.passed = 0
self.incr("failure")
self.CM.log("Test " + self.name + " failed [reason:" + reason + "]")
return None
def success(self):
'''Increment the success count'''
self.incr("success")
return 1
def skipped(self):
'''Increment the skipped count'''
self.incr("skipped")
return 1
def __call__(self, node):
'''Perform the given test'''
raise ValueError("Abstract Class member (__call__)")
self.incr("calls")
return self.failure()
def audit(self):
passed = 1
if len(self.Audits) > 0:
for audit in self.Audits:
if not audit():
self.CM.log("Internal %s Audit %s FAILED." % (self.name, audit.name()))
self.incr("auditfail")
passed = 0
return passed
def setup(self, node):
'''Setup the given test'''
return self.success()
def teardown(self, node):
'''Tear down the given test'''
return self.success()
def local_badnews(self, prefix, watch, local_ignore=[]):
errcount = 0
if not prefix:
prefix = "LocalBadNews:"
ignorelist = []
ignorelist.append(" CTS: ")
ignorelist.append(prefix)
ignorelist.extend(local_ignore)
while errcount < 100:
match=watch.look(0)
if match:
add_err = 1
for ignore in ignorelist:
if add_err == 1 and re.search(ignore, match):
add_err = 0
if add_err == 1:
self.CM.log(prefix + " " + match)
errcount=errcount+1
else:
break
else:
self.CM.log("Too many errors!")
return errcount
def is_applicable(self):
'''Return TRUE if we are applicable in the current test configuration'''
#raise ValueError("Abstract Class member (is_applicable)")
return 1
def canrunnow(self):
'''Return TRUE if we can meaningfully run right now'''
return 1
def errorstoignore(self):
'''Return list of errors which are 'normal' and should be ignored'''
return []
###################################################################
class StopTest(CTSTest):
###################################################################
'''Stop (deactivate) the cluster manager on a node'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name="Stop"
def __call__(self, node):
'''Perform the 'stop' test. '''
self.incr("calls")
if self.CM.ShouldBeStatus[node] != "up":
return self.skipped()
patterns = []
# Technically we should always be able to notice ourselves stopping
patterns.append(self.CM["Pat:We_stopped"] % node)
if self.CM.Env["use_logd"]:
patterns.append(self.CM["Pat:Logd_stopped"] % node)
# Any active node needs to notice this one left
# NOTE: This wont work if we have multiple partitions
for other in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[other] == "up" and other != node:
patterns.append(self.CM["Pat:They_stopped"] %(other, node))
#self.debug("Checking %s will notice %s left"%(other, node))
watch = CTS.LogWatcher(
self.CM["LogFileName"], patterns, self.CM["DeadTime"])
watch.setwatch()
if node == self.CM.OurNode:
self.incr("us")
else:
if self.CM.upcount() <= 1:
self.incr("all")
else:
self.incr("them")
self.CM.StopaCM(node)
watch_result = watch.lookforall()
failreason=None
UnmatchedList = "||"
if watch.unmatched:
(rc, output) = self.CM.rsh(node, "/bin/ps axf", None)
for line in output:
self.CM.debug(line)
for regex in watch.unmatched:
self.CM.log ("ERROR: Shutdown pattern not found: %s" % (regex))
UnmatchedList += regex + "||";
failreason="Missing shutdown pattern"
self.CM.cluster_stable(self.CM["DeadTime"])
if not watch.unmatched or self.CM.upcount() == 0:
return self.success()
if len(watch.unmatched) >= self.CM.upcount():
return self.failure("no match against (%s)" % UnmatchedList)
if failreason == None:
return self.success()
else:
return self.failure(failreason)
#
# We don't register StopTest because it's better when called by
# another test...
#
###################################################################
class StartTest(CTSTest):
###################################################################
'''Start (activate) the cluster manager on a node'''
def __init__(self, cm, debug=None):
CTSTest.__init__(self,cm)
self.name="start"
self.debug = debug
def __call__(self, node):
'''Perform the 'start' test. '''
self.incr("calls")
if self.CM.upcount() == 0:
self.incr("us")
else:
self.incr("them")
if self.CM.ShouldBeStatus[node] != "down":
return self.skipped()
elif self.CM.StartaCM(node):
return self.success()
else:
return self.failure("Startup %s on node %s failed"
%(self.CM["Name"], node))
#
# We don't register StartTest because it's better when called by
# another test...
#
###################################################################
class FlipTest(CTSTest):
###################################################################
'''If it's running, stop it. If it's stopped start it.
Overthrow the status quo...
'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Flip"
self.start = StartTest(cm)
self.stop = StopTest(cm)
def __call__(self, node):
'''Perform the 'Flip' test. '''
self.incr("calls")
if self.CM.ShouldBeStatus[node] == "up":
self.incr("stopped")
ret = self.stop(node)
type="up->down"
# Give the cluster time to recognize it's gone...
time.sleep(self.CM["StableTime"])
elif self.CM.ShouldBeStatus[node] == "down":
self.incr("started")
ret = self.start(node)
type="down->up"
else:
return self.skipped()
self.incr(type)
if ret:
return self.success()
else:
return self.failure("%s failure" % type)
# Register FlipTest as a good test to run
AllTestClasses.append(FlipTest)
###################################################################
class RestartTest(CTSTest):
###################################################################
'''Stop and restart a node'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Restart"
self.start = StartTest(cm)
self.stop = StopTest(cm)
def __call__(self, node):
'''Perform the 'restart' test. '''
self.incr("calls")
self.incr("node:" + node)
ret1 = 1
if self.CM.StataCM(node):
self.incr("WasStopped")
if not self.start(node):
return self.failure("start (setup) failure: "+node)
self.starttime=time.time()
if not self.stop(node):
return self.failure("stop failure: "+node)
if not self.start(node):
return self.failure("start failure: "+node)
return self.success()
# Register RestartTest as a good test to run
AllTestClasses.append(RestartTest)
###################################################################
class StonithdTest(CTSTest):
###################################################################
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name="Stonithd"
self.startall = SimulStartLite(cm)
def __call__(self, node):
self.incr("calls")
if len(self.CM.Env["nodes"]) < 2:
return self.skipped()
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
watchpats = []
watchpats.append("Forcing node %s to be terminated" % node)
watchpats.append("Scheduling Node %s for STONITH" % node)
watchpats.append("Executing .* fencing operation")
watchpats.append("sending fencing op RESET for %s" % node)
if not self.CM.is_node_dc(node):
# Won't be found if the DC is shot (and there's no equivalent message from stonithd)
watchpats.append("tengine_stonith_callback: .*result=0")
if self.CM.Env["at-boot"] == 0:
self.CM.debug("Expecting %s to stay down" % node)
self.CM.ShouldBeStatus[node]="down"
else:
self.CM.debug("Expecting %s to come up again %d" % (node, self.CM.Env["at-boot"]))
watchpats.append("%s crmd: .* S_STARTING -> S_PENDING" % node)
watchpats.append("%s crmd: .* S_PENDING -> S_NOT_DC" % node)
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats,
self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"])
watch.setwatch()
self.CM.rsh(node, "@sbindir@/crm_attribute --node %s --type status --attr-name terminate --attr-value true" % node)
matched = watch.lookforall()
if matched:
self.CM.debug("Found: "+ repr(matched))
else:
self.CM.log("Patterns not found: " + repr(watch.unmatched))
self.CM.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
self.CM.debug("Waiting STONITHd node to come back up")
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600)
self.CM.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self.CM.cluster_stable(self.CM["StartTime"])
if not matched:
return self.failure("Didn't find all expected patterns")
elif not is_stable:
return self.failure("Cluster did not become stable")
return self.success()
def is_applicable(self):
if self.CM.Env.has_key("DoStonith"):
return self.CM.Env["DoStonith"]
return 1
AllTestClasses.append(StonithdTest)
###################################################################
class StartOnebyOne(CTSTest):
###################################################################
'''Start all the nodes ~ one by one'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="StartOnebyOne"
self.stopall = SimulStopLite(cm)
self.start = StartTest(cm)
self.ns=CTS.NodeStatus(cm.Env)
def __call__(self, dummy):
'''Perform the 'StartOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return self.failure("Test setup failed")
failed=[]
self.starttime=time.time()
for node in self.CM.Env["nodes"]:
if not self.start(node):
failed.append(node)
if len(failed) > 0:
return self.failure("Some node failed to start: " + repr(failed))
return self.success()
# Register StartOnebyOne as a good test to run
AllTestClasses.append(StartOnebyOne)
###################################################################
class SimulStart(CTSTest):
###################################################################
'''Start all the nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStart"
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
def __call__(self, dummy):
'''Perform the 'SimulStart' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return self.failure("Setup failed")
self.CM.clear_all_caches()
if not self.startall(None):
return self.failure("Startall failed")
return self.success()
# Register SimulStart as a good test to run
AllTestClasses.append(SimulStart)
###################################################################
class SimulStop(CTSTest):
###################################################################
'''Stop all the nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStop"
self.startall = SimulStartLite(cm)
self.stopall = SimulStopLite(cm)
def __call__(self, dummy):
'''Perform the 'SimulStop' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
if not self.stopall(None):
return self.failure("Stopall failed")
return self.success()
# Register SimulStop as a good test to run
AllTestClasses.append(SimulStop)
###################################################################
class StopOnebyOne(CTSTest):
###################################################################
'''Stop all the nodes in order'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="StopOnebyOne"
self.startall = SimulStartLite(cm)
self.stop = StopTest(cm)
def __call__(self, dummy):
'''Perform the 'StopOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
failed=[]
self.starttime=time.time()
for node in self.CM.Env["nodes"]:
if not self.stop(node):
failed.append(node)
if len(failed) > 0:
return self.failure("Some node failed to stop: " + repr(failed))
self.CM.clear_all_caches()
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(StopOnebyOne)
###################################################################
class RestartOnebyOne(CTSTest):
###################################################################
'''Restart all the nodes in order'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="RestartOnebyOne"
self.startall = SimulStartLite(cm)
def __call__(self, dummy):
'''Perform the 'RestartOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
did_fail=[]
self.starttime=time.time()
self.restart = RestartTest(self.CM)
for node in self.CM.Env["nodes"]:
if not self.restart(node):
did_fail.append(node)
if did_fail:
return self.failure("Could not restart %d nodes: %s"
%(len(did_fail), repr(did_fail)))
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(RestartOnebyOne)
###################################################################
class PartialStart(CTSTest):
###################################################################
'''Start a node - but tell it to stop before it finishes starting up'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="PartialStart"
self.startall = SimulStartLite(cm)
self.stopall = SimulStopLite(cm)
def __call__(self, node):
'''Perform the 'PartialStart' test. '''
self.incr("calls")
ret = self.stopall(None)
if not ret:
return self.failure("Setup failed")
# FIXME! This should use the CM class to get the pattern
# then it would be applicable in general
watchpats = []
watchpats.append("Starting crmd")
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats,
timeout=self.CM["DeadTime"]+10)
watch.setwatch()
self.CM.StartaCMnoBlock(node)
ret = watch.lookforall()
if not ret:
self.CM.log("Patterns not found: " + repr(watch.unmatched))
return self.failure("Setup of %s failed" % node)
ret = self.stopall(None)
if not ret:
return self.failure("%s did not stop in time" % node)
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(PartialStart)
#######################################################################
class StandbyTest(CTSTest):
#######################################################################
'''Standby with CRM of HA release 2'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="standby2"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
# make sure the node is active
# set the node to standby mode
# check resources, none resource should be running on the node
# set the node to active mode
# check resouces, resources should have been migrated back (SHOULD THEY?)
def __call__(self, node):
self.incr("calls")
ret=self.startall(None)
if not ret:
return self.failure("Start all nodes failed")
self.CM.debug("Make sure node %s is active" % node)
if self.CM.StandbyStatus(node) != "off":
if not self.CM.SetStandbyMode(node, "off"):
return self.failure("can't set node %s to active mode" % node)
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "off":
return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
self.CM.debug("Getting resources running on node %s" % node)
rsc_on_node = self.CM.active_resources(node)
self.CM.debug("Setting node %s to standby mode" % node)
if not self.CM.SetStandbyMode(node, "on"):
return self.failure("can't set node %s to standby mode" % node)
time.sleep(30) # Allow time for the update to be applied and cause something
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "on":
return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status))
self.CM.debug("Checking resources")
bad_run = self.CM.active_resources(node)
if len(bad_run) > 0:
- return self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run)))
+ rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run)))
+ self.CM.debug("Setting node %s to active mode" % node)
+ self.CM.SetStandbyMode(node, "off")
+ return rc
self.CM.debug("Setting node %s to active mode" % node)
if not self.CM.SetStandbyMode(node, "off"):
return self.failure("can't set node %s to active mode" % node)
time.sleep(30) # Allow time for the update to be applied and cause something
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "off":
return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
return self.success()
AllTestClasses.append(StandbyTest)
##############################################################################
class BandwidthTest(CTSTest):
##############################################################################
# Tests should not be cluster-manager-specific
# If you need to find out cluster manager configuration to do this, then
# it should be added to the generic cluster manager API.
'''Test the bandwidth which heartbeat uses'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name = "Bandwidth"
self.start = StartTest(cm)
self.__setitem__("min",0)
self.__setitem__("max",0)
self.__setitem__("totalbandwidth",0)
self.tempfile = tempfile.mktemp(".cts")
self.startall = SimulStartLite(cm)
def __call__(self, node):
'''Perform the Bandwidth test'''
self.incr("calls")
if self.CM.upcount()<1:
return self.skipped()
Path = self.CM.InternalCommConfig()
if "ip" not in Path["mediatype"]:
return self.skipped()
port = Path["port"][0]
port = int(port)
ret = self.startall(None)
if not ret:
return self.failure("Test setup failed")
time.sleep(5) # We get extra messages right after startup.
fstmpfile = "/var/run/band_estimate"
dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \
% (port, fstmpfile)
rc = self.CM.rsh(node, dumpcmd)
if rc == 0:
farfile = "root@%s:%s" % (node, fstmpfile)
self.CM.rsh.cp(farfile, self.tempfile)
Bandwidth = self.countbandwidth(self.tempfile)
if not Bandwidth:
self.CM.log("Could not compute bandwidth.")
return self.success()
intband = int(Bandwidth + 0.5)
self.CM.log("...bandwidth: %d bits/sec" % intband)
self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth
if self.Stats["min"] == 0:
self.Stats["min"] = Bandwidth
if Bandwidth > self.Stats["max"]:
self.Stats["max"] = Bandwidth
if Bandwidth < self.Stats["min"]:
self.Stats["min"] = Bandwidth
self.CM.rsh(node, "rm -f %s" % fstmpfile)
os.unlink(self.tempfile)
return self.success()
else:
return self.failure("no response from tcpdump command [%d]!" % rc)
def countbandwidth(self, file):
fp = open(file, "r")
fp.seek(0)
count = 0
sum = 0
while 1:
line = fp.readline()
if not line:
return None
if re.search("udp",line) or re.search("UDP,", line):
count=count+1
linesplit = string.split(line," ")
for j in range(len(linesplit)-1):
if linesplit[j]=="udp": break
if linesplit[j]=="length:": break
try:
sum = sum + int(linesplit[j+1])
except ValueError:
self.CM.log("Invalid tcpdump line: %s" % line)
return None
T1 = linesplit[0]
timesplit = string.split(T1,":")
time2split = string.split(timesplit[2],".")
time1 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
break
while count < 100:
line = fp.readline()
if not line:
return None
if re.search("udp",line) or re.search("UDP,", line):
count = count+1
linessplit = string.split(line," ")
for j in range(len(linessplit)-1):
if linessplit[j] =="udp": break
if linesplit[j]=="length:": break
try:
sum=int(linessplit[j+1])+sum
except ValueError:
self.CM.log("Invalid tcpdump line: %s" % line)
return None
T2 = linessplit[0]
timesplit = string.split(T2,":")
time2split = string.split(timesplit[2],".")
time2 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
time = time2-time1
if (time <= 0):
return 0
return (sum*8)/time
def is_applicable(self):
'''BandwidthTest never applicable'''
return 0
AllTestClasses.append(BandwidthTest)
###################################################################
class ResourceRecover(CTSTest):
###################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="ResourceRecover"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
self.max=30
self.rid=None
# these are the values used for the new LRM API call
self.action = "asyncmon"
self.interval = 0
def __call__(self, node):
'''Perform the 'ResourceRecover' test. '''
self.incr("calls")
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
resourcelist = self.CM.active_resources(node)
# if there are no resourcelist, return directly
if len(resourcelist)==0:
self.CM.log("No active resources on %s" % node)
return self.skipped()
self.rid = self.CM.Env.RandomGen.choice(resourcelist)
self.CM.debug("Shooting %s..." % self.rid)
pats = []
pats.append("crmd:.* Performing .* op=%s_stop_0" % self.rid)
pats.append("crmd:.* Performing .* op=%s_start_0" % self.rid)
pats.append("crmd:.* LRM operation %s_start_0.*complete" % self.rid)
pats.append("Updating failcount for %s on .* after .* %s"
% (self.rid, self.action))
watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60)
watch.setwatch()
# fail a resource by calling an action it doesn't support
self.CM.rsh(node, "@sbindir@/crm_resource -F -r %s -H %s &>/dev/null" % (self.rid, node))
watch.lookforall()
self.CM.cluster_stable()
recovernode=self.CM.ResourceLocation(self.rid)
if len(recovernode)==1:
self.CM.debug("Recovered: %s is running on %s"
%(self.rid, recovernode[0]))
if not watch.unmatched:
return self.success()
else:
return self.failure("Patterns not found: %s"
% repr(watch.unmatched))
elif len(recovernode)==0:
return self.failure("%s was not recovered and is inactive"
% self.rid)
else:
return self.failure("%s is now active on more than one node: %s"
%(self.rid, str(recovernode)))
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [ """Updating failcount for %s""" % self.rid,
"""Unknown operation: fail""",
"""ERROR: sending stonithRA op to stonithd failed.""",
"""ERROR: process_lrm_event: LRM operation %s_%s_%d""" % (self.rid, self.action, self.interval),
"""ERROR: process_graph_event: Action %s_%s_%d .* initiated outside of a transition""" % (self.rid, self.action, self.interval),
]
AllTestClasses.append(ResourceRecover)
###################################################################
class ComponentFail(CTSTest):
###################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="ComponentFail"
self.startall = SimulStartLite(cm)
self.complist = cm.Components()
self.patterns = []
self.okerrpatterns = []
def __call__(self, node):
'''Perform the 'ComponentFail' test. '''
self.incr("calls")
self.patterns = []
self.okerrpatterns = []
# start all nodes
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
if not self.CM.cluster_stable(self.CM["StableTime"]):
return self.failure("Setup failed - unstable")
node_is_dc = self.CM.is_node_dc(node, None)
# select a component to kill
chosen = self.CM.Env.RandomGen.choice(self.complist)
while chosen.dc_only == 1 and node_is_dc == 0:
chosen = self.CM.Env.RandomGen.choice(self.complist)
self.CM.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot))
self.incr(chosen.name)
if chosen.name != "aisexec":
if self.CM["Name"] != "crm-lha" or chosen.name != "pengine":
self.patterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name))
self.patterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name))
self.patterns.extend(chosen.pats)
if node_is_dc:
self.patterns.extend(chosen.dc_pats)
# Make sure the node goes down and then comes back up if it should reboot...
if chosen.triggersreboot:
for other in self.CM.Env["nodes"]:
if other != node:
self.patterns.append(self.CM["Pat:They_stopped"] %(other, node))
self.patterns.append(self.CM["Pat:Slave_started"] % node)
self.patterns.append(self.CM["Pat:Local_started"] % node)
# In an ideal world, this next stuff should be in the "chosen" object as a member function
if self.CM["Name"] == "crm-lha":
if chosen.triggersreboot:
if chosen.dc_only:
# Sometimes these will be in the log, and sometimes they won't...
self.okerrpatterns.append("%s crmd:.*Process %s:.* exited" %(node, chosen.name))
self.okerrpatterns.append("%s crmd:.*I_ERROR.*crmdManagedChildDied" %node)
self.okerrpatterns.append("%s crmd:.*The %s subsystem terminated unexpectedly" %(node, chosen.name))
self.okerrpatterns.append("ERROR: Client .* exited with return code")
else:
# Sometimes this won't be in the log...
self.okerrpatterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name))
self.okerrpatterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name))
self.okerrpatterns.append(self.CM["Pat:ChildExit"])
# supply a copy so self.patterns doesnt end up empty
tmpPats = []
tmpPats.extend(self.patterns)
self.patterns.extend(chosen.badnews_ignore)
# Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
stonithPats = []
stonithPats.append("sending fencing op RESET for %s" % node)
stonith = CTS.LogWatcher(self.CM["LogFileName"], stonithPats, 0)
stonith.setwatch()
# set the watch for stable
watch = CTS.LogWatcher(
self.CM["LogFileName"], tmpPats,
self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"])
watch.setwatch()
# kill the component
chosen.kill(node)
# check to see Heartbeat noticed
matched = watch.lookforall()
if matched:
self.CM.debug("Found: "+ repr(matched))
else:
self.CM.log("Patterns not found: " + repr(watch.unmatched))
if self.CM.Env["at-boot"] == 0:
self.CM.debug("Checking if %s was shot" % node)
shot = stonith.look(60)
if shot:
self.CM.debug("Found: "+ repr(shot))
self.CM.ShouldBeStatus[node]="down"
self.CM.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
self.CM.debug("Waiting for any STONITHd node to come back up")
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600)
self.CM.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self.CM.cluster_stable(self.CM["StartTime"])
if not matched:
return self.failure("Didn't find all expected patterns")
elif not is_stable:
return self.failure("Cluster did not become stable")
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
# Note that okerrpatterns refers to the last time we ran this test
# The good news is that this works fine for us...
self.okerrpatterns.extend(self.patterns)
return self.okerrpatterns
AllTestClasses.append(ComponentFail)
####################################################################
class SplitBrainTest(CTSTest):
####################################################################
'''It is used to test split-brain. when the path between the two nodes break
check the two nodes both take over the resource'''
def __init__(self,cm):
CTSTest.__init__(self,cm)
self.name = "SplitBrain"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
def isolate_partition(self, partition):
other_nodes = []
other_nodes.extend(self.CM.Env["nodes"])
for node in partition:
try:
other_nodes.remove(node)
except ValueError:
self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]) + " from " +repr(partition))
if len(other_nodes) == 0:
return 1
self.CM.debug("Creating partition: " + repr(partition))
self.CM.debug("Everyone else: " + repr(other_nodes))
for node in partition:
if not self.CM.isolate_node(node, other_nodes):
self.CM.log("Could not isolate %s" % node)
return 0
return 1
def heal_partition(self, partition):
other_nodes = []
other_nodes.extend(self.CM.Env["nodes"])
for node in partition:
try:
other_nodes.remove(node)
except ValueError:
self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]))
if len(other_nodes) == 0:
return 1
self.CM.debug("Healing partition: " + repr(partition))
self.CM.debug("Everyone else: " + repr(other_nodes))
for node in partition:
self.CM.unisolate_node(node, other_nodes)
def __call__(self, node):
'''Perform split-brain test'''
self.incr("calls")
self.passed = 1
partitions = {}
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
while 1:
# Retry until we get multiple partitions
partitions = {}
p_max = len(self.CM.Env["nodes"])
for node in self.CM.Env["nodes"]:
p = self.CM.Env.RandomGen.randint(1, p_max)
if not partitions.has_key(p):
partitions[p]= []
partitions[p].append(node)
p_max = len(partitions.keys())
if p_max > 1:
break
# else, try again
self.CM.debug("Created %d partitions" % p_max)
for key in partitions.keys():
self.CM.debug("Partition["+str(key)+"]:\t"+repr(partitions[key]))
# Disabling STONITH to reduce test complexity for now
self.CM.rsh(node, "@sbindir@/crm_attribute -n stonith-enabled -v false")
for key in partitions.keys():
self.isolate_partition(partitions[key])
count = 30
while count > 0:
if len(self.CM.find_partitions()) != p_max:
time.sleep(10)
else:
break
else:
self.failure("Expected partitions were not created")
# Target number of partitions formed - wait for stability
if not self.CM.cluster_stable():
self.failure("Partitioned cluster not stable")
# Now audit the cluster state
self.CM.partitions_expected = p_max
if not self.audit():
self.failure("Audits failed")
self.CM.partitions_expected = 1
# And heal them again
for key in partitions.keys():
self.heal_partition(partitions[key])
# Wait for a single partition to form
count = 30
while count > 0:
if len(self.CM.find_partitions()) != 1:
time.sleep(10)
count -= 1
else:
break
else:
self.failure("Cluster did not reform")
# Wait for it to have the right number of members
count = 30
while count > 0:
members = []
partitions = self.CM.find_partitions()
if len(partitions) > 0:
members = partitions[0].split()
if len(members) != len(self.CM.Env["nodes"]):
time.sleep(10)
count -= 1
else:
break
else:
self.failure("Cluster did not completely reform")
# Wait up to 20 minutes - the delay is more preferable than
# trying to continue with in a messed up state
if not self.CM.cluster_stable(1200):
self.failure("Reformed cluster not stable")
answer = raw_input('Continue? [nY]')
if answer and answer == "n":
raise ValueError("Reformed cluster not stable")
# Turn fencing back on
if self.CM.Env["DoStonith"]:
self.CM.rsh(node, "@sbindir@/crm_attribute -D -n stonith-enabled")
self.CM.cluster_stable()
if self.passed:
return self.success()
return self.failure("See previous errors")
def errorstoignore(self):
'''Return list of errors which are 'normal' and should be ignored'''
return [
"Another DC detected:",
"ERROR: attrd_cib_callback: .*Application of an update diff failed",
"crmd_ha_msg_callback:.*not in our membership list",
"CRIT:.*node.*returning after partition",
]
def is_applicable(self):
'''Never applicable, only for use by the memory test'''
return len(self.CM.Env["nodes"]) > 2 and self.CM.Env["experimental-tests"]
AllTestClasses.append(SplitBrainTest)
####################################################################
class MemoryTest(CTSTest):
####################################################################
'''Check to see if anyone is leaking memory'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Memory"
# self.test = ElectionMemoryTest(cm)
self.test = ResourceRecover(cm)
self.startall = SimulStartLite(cm)
self.before = {}
self.after = {}
def __call__(self, node):
self.incr("calls")
ps_command='''ps -eo ucomm,pid,pmem,tsiz,dsiz,rss,vsize | grep -e ccm -e ha_logd -e cib -e crmd -e lrmd -e tengine -e pengine'''
memory_error = [
"", "", "",
"Code",
"Data",
"Resident",
"Total"
]
ret = self.startall(None)
if not ret:
return self.failure("Test setup failed")
time.sleep(10)
for node in self.CM.Env["nodes"]:
self.before[node] = {}
(rc, results) = self.CM.rsh(node, ps_command, None)
for result in results:
tokens = result.split()
self.before[node][tokens[1]] = result
# do something...
if not self.test(node):
return self.failure("Underlying test failed")
time.sleep(10)
for node in self.CM.Env["nodes"]:
self.after[node] = {}
(rc, results) = self.CM.rsh(node, ps_command, None)
for result in results:
tokens = result.split()
self.after[node][tokens[1]] = result
failed_nodes = []
for node in self.CM.Env["nodes"]:
failed = 0
for process in self.before[node]:
messages = []
before_line = self.before[node][process]
after_line = self.after[node][process]
if not after_line:
self.CM.log("%s %s[%s] exited during the test"
%(node, before_tokens[0], before_tokens[1]))
continue
before_tokens = before_line.split()
after_tokens = after_line.split()
# 3 : Code size
# 4 : Data size
# 5 : Resident size
# 6 : Total size
for index in [ 3, 4, 6 ]:
mem_before = int(before_tokens[index])
mem_after = int(after_tokens[index])
mem_diff = mem_after - mem_before
mem_allow = mem_before * 0.01
# for now...
mem_allow = 0
if mem_diff > mem_allow:
failed = 1
messages.append("%s size grew by %dkB (%dkB)"
%(memory_error[index], mem_diff, mem_after))
elif mem_diff < 0:
messages.append("%s size shrank by %dkB (%dkB)"
%(memory_error[index], mem_diff, mem_after))
if len(messages) > 0:
self.CM.log("Process %s[%s] on %s: %s"
%(before_tokens[0], before_tokens[1], node,
repr(messages)))
self.CM.debug("%s Before: %s[%s] (%s%%):\tcode=%skB, data=%skB, resident=%skB, total=%skB"
%(node, before_tokens[0], before_tokens[1],
before_tokens[2], before_tokens[3],
before_tokens[4], before_tokens[5],
before_tokens[6]))
self.CM.debug("%s After: %s[%s] (%s%%):\tcode=%skB, data=%skB, resident=%skB, total=%skB"
%(node, after_tokens[0], after_tokens[1],
after_tokens[2], after_tokens[3],
after_tokens[4], after_tokens[5],
after_tokens[6]))
if failed == 1:
failed_nodes.append(node)
if len(failed_nodes) > 0:
return self.failure("Memory leaked on: " + repr(failed_nodes))
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [ """ERROR: .* LRM operation.*monitor on .*: not running""",
"""pengine:.*Handling failed """]
#AllTestClasses.append(MemoryTest)
####################################################################
class ElectionMemoryTest(CTSTest):
####################################################################
'''Check to see if anyone is leaking memory'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Election"
def __call__(self, node):
self.incr("calls")
self.rsh(node, self.CM["ElectionCmd"]%node)
if self.CM.cluster_stable():
return self.success()
return self.failure("Cluster not stable")
def is_applicable(self):
'''Never applicable, only for use by the memory test'''
return 0
AllTestClasses.append(ElectionMemoryTest)
####################################################################
class Reattach(CTSTest):
####################################################################
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Reattach"
self.startall = SimulStartLite(cm)
self.restart1 = RestartTest(cm)
self.stopall = SimulStopLite(cm)
def __call__(self, node):
self.incr("calls")
ret = self.startall(None)
if not ret:
return self.failure("Test setup failed")
pats = []
managed = CTS.LogWatcher(self.CM["LogFileName"], ["is-managed-default"], timeout=60)
managed.setwatch()
self.CM.debug("Disable resource management")
self.CM.rsh(node, "@sbindir@/crm_attribute -n is-managed-default -v false")
if not managed.lookforall():
self.CM.log("Patterns not found: " + repr(managed.unmatched))
return self.failure("Resource management not disabled")
pats = []
pats.append("crmd:.*Performing.*_stop_0")
pats.append("crmd:.*Performing.*_start_0")
pats.append("crmd:.*Performing.*_promote_0")
pats.append("crmd:.*Performing.*_demote_0")
pats.append("crmd:.*Performing.*_migrate_.*_0")
watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60)
watch.setwatch()
self.CM.debug("Shutting down the cluster")
ret = self.stopall(None)
if not ret:
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "@sbindir@/crm_attribute -D -n is-managed-default")
return self.failure("Couldn't shut down the cluster")
self.CM.debug("Bringing the cluster back up")
ret = self.startall(None)
if not ret:
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "@sbindir@/crm_attribute -D -n is-managed-default")
return self.failure("Couldn't restart the cluster")
if self.local_badnews("ResourceActivity:", watch):
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "@sbindir@/crm_attribute -D -n is-managed-default")
return self.failure("Resources stopped or started during cluster restart")
watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60)
watch.setwatch()
managed = CTS.LogWatcher(self.CM["LogFileName"], ["is-managed-default"], timeout=60)
managed.setwatch()
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "@sbindir@/crm_attribute -D -n is-managed-default")
if not managed.lookforall():
self.CM.log("Patterns not found: " + repr(managed.unmatched))
return self.failure("Resource management not enabled")
self.CM.cluster_stable()
# Ignore actions for STONITH resources
ignore = []
(rc, lines) = self.CM.rsh(node, "@sbindir@/crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rclass == "stonith":
self.CM.debug("Ignoring: crmd:.*Performing.*op=%s_.*_0" % r.id)
ignore.append("crmd:.*Performing.*op=%s_.*_0" % r.id)
if self.local_badnews("ResourceActivity:", watch, ignore):
return self.failure("Resources stopped or started after resource management was re-enabled")
return ret
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [
"You may ignore this error if it is unmanaged.",
"pingd: .*ERROR: send_ipc_message:",
"pingd: .*ERROR: send_update:",
]
AllTestClasses.append(Reattach)
####################################################################
class SpecialTest1(CTSTest):
####################################################################
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SpecialTest1"
self.startall = SimulStartLite(cm)
self.restart1 = RestartTest(cm)
self.stopall = SimulStopLite(cm)
def __call__(self, node):
'''Perform the 'SpecialTest1' test for Andrew. '''
self.incr("calls")
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return ret
# Start the selected node
ret = self.restart1(node)
if not ret:
return ret
# Start all remaining nodes
ret = self.startall(None)
return ret
AllTestClasses.append(SpecialTest1)
###################################################################
class NearQuorumPointTest(CTSTest):
###################################################################
'''
This test brings larger clusters near the quorum point (50%).
In addition, it will test doing starts and stops at the same time.
Here is how I think it should work:
- loop over the nodes and decide randomly which will be up and which
will be down Use a 50% probability for each of up/down.
- figure out what to do to get into that state from the current state
- in parallel, bring up those going up and bring those going down.
'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="NearQuorumPoint"
def __call__(self, dummy):
'''Perform the 'NearQuorumPoint' test. '''
self.incr("calls")
startset = []
stopset = []
#decide what to do with each node
for node in self.CM.Env["nodes"]:
action = self.CM.Env.RandomGen.choice(["start","stop"])
#action = self.CM.Env.RandomGen.choice(["start","stop","no change"])
if action == "start" :
startset.append(node)
elif action == "stop" :
stopset.append(node)
self.CM.debug("start nodes:" + repr(startset))
self.CM.debug("stop nodes:" + repr(stopset))
#add search patterns
watchpats = [ ]
for node in stopset:
if self.CM.ShouldBeStatus[node] == "up":
watchpats.append(self.CM["Pat:We_stopped"] % node)
for node in startset:
if self.CM.ShouldBeStatus[node] == "down":
#watchpats.append(self.CM["Pat:Slave_started"] % node)
watchpats.append(self.CM["Pat:Local_started"] % node)
if len(watchpats) == 0:
return self.skipped()
if len(startset) != 0:
watchpats.append(self.CM["Pat:DC_IDLE"])
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats
, timeout=self.CM["DeadTime"]+10)
watch.setwatch()
#begin actions
for node in stopset:
if self.CM.ShouldBeStatus[node] == "up":
self.CM.StopaCMnoBlock(node)
for node in startset:
if self.CM.ShouldBeStatus[node] == "down":
self.CM.StartaCMnoBlock(node)
#get the result
if watch.lookforall():
self.CM.cluster_stable()
return self.success()
self.CM.log("Warn: Patterns not found: " + repr(watch.unmatched))
#get the "bad" nodes
upnodes = []
for node in stopset:
if self.CM.StataCM(node) == 1:
upnodes.append(node)
downnodes = []
for node in startset:
if self.CM.StataCM(node) == 0:
downnodes.append(node)
if upnodes == [] and downnodes == []:
self.CM.cluster_stable()
return self.success()
if len(upnodes) > 0:
self.CM.log("Warn: Unstoppable nodes: " + repr(upnodes))
if len(downnodes) > 0:
self.CM.log("Warn: Unstartable nodes: " + repr(downnodes))
return self.failure()
AllTestClasses.append(NearQuorumPointTest)
###################################################################
class BSC_AddResource(CTSTest):
###################################################################
'''Add a resource to the cluster'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name="AddResource"
self.resource_offset = 0
self.cib_cmd="""@sbindir@/cibadmin -C -o %s -X '%s' """
def __call__(self, node):
self.incr("calls")
self.resource_offset = self.resource_offset + 1
r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset)
start_pat = "crmd.*%s_start_0.*complete"
patterns = []
patterns.append(start_pat % r_id)
watch = CTS.LogWatcher(
self.CM["LogFileName"], patterns, self.CM["DeadTime"])
watch.setwatch()
fields = string.split(self.CM.Env["IPBase"], '.')
fields[3] = str(int(fields[3])+1)
ip = string.join(fields, '.')
self.CM.Env["IPBase"] = ip
if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip):
return self.failure("Make resource %s failed" % r_id)
failed = 0
watch_result = watch.lookforall()
if watch.unmatched:
for regex in watch.unmatched:
self.CM.log ("Warn: Pattern not found: %s" % (regex))
failed = 1
if failed:
return self.failure("Resource pattern(s) not found")
if not self.CM.cluster_stable(self.CM["DeadTime"]):
return self.failure("Unstable cluster")
return self.success()
def make_ip_resource(self, node, id, rclass, type, ip):
self.CM.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node))
rsc_xml="""
<primitive id="%s" class="%s" type="%s" provider="heartbeat">
<instance_attributes id="%s"><attributes>
<nvpair id="%s" name="ip" value="%s"/>
</attributes></instance_attributes>
</primitive>""" % (id, rclass, type, id, id, ip)
node_constraint="""
<rsc_location id="run_%s" rsc="%s">
<rule id="pref_run_%s" score="100">
<expression id="%s_loc_expr" attribute="#uname" operation="eq" value="%s"/>
</rule>
</rsc_location>""" % (id, id, id, id, node)
rc = 0
(rc, lines) = self.CM.rsh(node, self.cib_cmd % ("constraints", node_constraint), None)
if rc != 0:
self.CM.log("Constraint creation failed: %d" % rc)
return None
(rc, lines) = self.CM.rsh(node, self.cib_cmd % ("resources", rsc_xml), None)
if rc != 0:
self.CM.log("Resource creation failed: %d" % rc)
return None
return 1
def is_applicable(self):
if self.CM.Env["DoBSC"]:
return 1
return None
class SimulStopLite(CTSTest):
###################################################################
'''Stop any active nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStopLite"
def __call__(self, dummy):
'''Perform the 'SimulStopLite' setup work. '''
self.incr("calls")
self.CM.debug("Setup: " + self.name)
# We ignore the "node" parameter...
watchpats = [ ]
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "up":
self.incr("WasStarted")
watchpats.append(self.CM["Pat:All_stopped"] % node)
if self.CM.Env["use_logd"]:
watchpats.append(self.CM["Pat:Logd_stopped"] % node)
if len(watchpats) == 0:
self.CM.clear_all_caches()
return self.skipped()
# Stop all the nodes - at about the same time...
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats
, timeout=self.CM["DeadTime"]+10)
watch.setwatch()
self.starttime=time.time()
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "up":
self.CM.StopaCMnoBlock(node)
if watch.lookforall():
self.CM.clear_all_caches()
return self.success()
did_fail=0
up_nodes = []
for node in self.CM.Env["nodes"]:
if self.CM.StataCM(node) == 1:
did_fail=1
up_nodes.append(node)
if did_fail:
return self.failure("Active nodes exist: " + repr(up_nodes))
self.CM.log("Warn: All nodes stopped but CTS didnt detect: "
+ repr(watch.unmatched))
self.CM.clear_all_caches()
return self.failure("Missing log message: "+repr(watch.unmatched))
def is_applicable(self):
'''SimulStopLite is a setup test and never applicable'''
return 0
###################################################################
class SimulStartLite(CTSTest):
###################################################################
'''Start any stopped nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStartLite"
def __call__(self, dummy):
'''Perform the 'SimulStartList' setup work. '''
self.incr("calls")
self.CM.debug("Setup: " + self.name)
# We ignore the "node" parameter...
watchpats = [ ]
uppat = self.CM["Pat:Slave_started"]
if self.CM.upcount() == 0:
uppat = self.CM["Pat:Local_started"]
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "down":
self.incr("WasStopped")
watchpats.append(uppat % node)
if len(watchpats) == 0:
return self.skipped()
watchpats.append(self.CM["Pat:DC_IDLE"])
# Start all the nodes - at about the same time...
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats
, timeout=self.CM["DeadTime"]+10)
watch.setwatch()
self.starttime=time.time()
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "down":
self.CM.StartaCMnoBlock(node)
if watch.lookforall():
for attempt in (1, 2, 3, 4, 5):
if self.CM.cluster_stable():
return self.success()
return self.failure("Cluster did not stabilize")
did_fail=0
unstable = []
for node in self.CM.Env["nodes"]:
if self.CM.StataCM(node) == 0:
did_fail=1
unstable.append(node)
if did_fail:
return self.failure("Unstarted nodes exist: " + repr(unstable))
unstable = []
for node in self.CM.Env["nodes"]:
if not self.CM.node_stable(node):
did_fail=1
unstable.append(node)
if did_fail:
return self.failure("Unstable cluster nodes exist: "
+ repr(unstable))
self.CM.log("ERROR: All nodes started but CTS didnt detect: "
+ repr(watch.unmatched))
return self.failure()
def is_applicable(self):
'''SimulStartLite is a setup test and never applicable'''
return 0
def TestList(cm, audits):
result = []
for testclass in AllTestClasses:
bound_test = testclass(cm)
if bound_test.is_applicable():
bound_test.Audits = audits
result.append(bound_test)
return result

File Metadata

Mime Type
text/x-diff
Expires
Wed, Jun 25, 7:16 AM (47 m, 51 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1952513
Default Alt Text
(70 KB)

Event Timeline