diff --git a/cts/CTStests.py b/cts/CTStests.py index 8b792ae7d1..84ddf67dd9 100644 --- a/cts/CTStests.py +++ b/cts/CTStests.py @@ -1,2378 +1,2388 @@ '''CTS: Cluster Testing System: Tests module There are a few things we want to do here: ''' __copyright__=''' Copyright (C) 2000, 2001 Alan Robertson Licensed under the GNU GPL. Add RecourceRecover testcase Zhao Kai ''' # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # SPECIAL NOTE: # # Tests may NOT implement any cluster-manager-specific code in them. # EXTEND the ClusterManager object to provide the base capabilities # the test needs if you need to do something that the current CM classes # do not. Otherwise you screw up the whole point of the object structure # in CTS. # # Thank you. # import CTS import CTSaudits import time, os, re, types, string, tempfile, sys from CTSaudits import * from stat import * # List of all class objects for tests which we ought to # consider running. class AllTests: ''' A collection of tests which are run at random. ''' def __init__(self, scenario, cm, tests, Audits): self.CM = cm self.Env = cm.Env self.Scenario = scenario self.Tests = [] self.Audits = [] self.ns=CTS.NodeStatus(self.Env) self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0} self.IndividualStats= {} for audit in Audits: if not issubclass(audit.__class__, ClusterAudit): raise ValueError("Init value must be a subclass of ClusterAudit") if audit.is_applicable(): self.Audits.append(audit) for test in tests: if not issubclass(test.__class__, CTSTest): raise ValueError("Init value must be a subclass of CTSTest") if test.is_applicable(): self.Tests.append(test) if not scenario.IsApplicable(): raise ValueError("Scenario not applicable in" " given Environment") def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 def audit(self, BadNews, test): errcount=0 BadNewsDebug=0 #BadNews.debug=1 ignorelist = [] ignorelist.append(" CTS: ") ignorelist.append("BadNews:") ignorelist.extend(self.CM.errorstoignore()) if test: ignorelist.extend(test.errorstoignore()) while errcount < 1000: if BadNewsDebug: print "Looking for BadNews" match=BadNews.look(0) if match: if BadNewsDebug: print "BadNews found: "+match add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): if BadNewsDebug: print "Ignoring based on pattern: ("+ignore+")" add_err = 0 if add_err == 1: self.CM.log("BadNews: " + match) self.incr("BadNews") errcount=errcount+1 else: break else: answer = raw_input('Big problems. Continue? [nY]') if answer and answer == "n": self.CM.log("Shutting down.") self.CM.stopall() self.summarize() raise ValueError("Looks like we hit a BadNews jackpot!") for audit in self.Audits: if not audit(): self.CM.log("Audit " + audit.name() + " FAILED.") self.incr("auditfail") if test: test.incr("auditfail") def summarize(self): self.CM.log("****************") self.CM.log("Overall Results:" + repr(self.Stats)) self.CM.log("****************") stat_filter = { "calls":0, "failure":0, "skipped":0, "auditfail":0, } self.CM.log("Test Summary") for test in self.Tests: for key in stat_filter.keys(): stat_filter[key] = test.Stats[key] self.CM.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter)) self.CM.debug("Detailed Results") for test in self.Tests: self.CM.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats)) self.CM.log("<<<<<<<<<<<<<<<< TESTS COMPLETED") def test_loop(self, BadNews, max): testcount=1 self.CM.log("Executing all tests once") for test in self.Tests: if self.run_test(BadNews, test, testcount): testcount += 1 return testcount def run_test(self, BadNews, test, testcount): nodechoice = self.Env.RandomNode() ret = 1 where = "" did_run = 0 self.CM.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) +"["+ ("%d" % testcount).rjust(3) +"]") starttime=test.set_starttime() if not test.setup(nodechoice): self.CM.log("Setup failed") ret = 0 elif not test.canrunnow(nodechoice): self.CM.log("Skipped") test.skipped() else: did_run = 1 ret = test(nodechoice) if not test.teardown(nodechoice): self.CM.log("Teardown failed") ret = 0 self.CM.debug("MARK: test %s stop" % test.name) stoptime=time.time() self.CM.oprofileSave(testcount) elapsed_time = stoptime - starttime test_time = stoptime - test.starttime if not test.has_key("min_time"): test["elapsed_time"] = elapsed_time test["min_time"] = test_time test["max_time"] = test_time else: test["elapsed_time"] = test["elapsed_time"] + elapsed_time if test_time < test["min_time"]: test["min_time"] = test_time if test_time > test["max_time"]: test["max_time"] = test_time if ret: self.incr("success") self.CM.debug("Test %s runtime: %.2f" % (test.name, test_time)) else: self.incr("failure") self.CM.statall() did_run = 1 # Force the test count to be incrimented anyway so test extraction works self.audit(BadNews, test) return did_run def run(self, max=1): ( ''' Set up the given scenario, then run the selected tests at random for the selected number of iterations. ''') BadNews=CTS.LogWatcher(self.CM["LogFileName"], self.CM["BadRegexes"] , timeout=0) BadNews.setwatch() self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"]) self.CM.oprofileStop() self.CM.oprofileStart() if not self.CM.Env["DoBSC"]: audit = LogAudit(self.CM) if not audit(): self.CM.log("Audit " + audit.name() + " FAILED.") return (None, None) else: self.CM.log("Audit " + audit.name() + " passed.") audit = DiskAudit(self.CM) if not audit(): self.CM.log("Audit " + audit.name() + " FAILED.") return (None, None) else: self.CM.log("Audit " + audit.name() + " passed.") if not self.Scenario.SetUp(self.CM): return (None, None) self.CM.oprofileSave(0) time.sleep(30) # This makes sure everything is stabilized before starting... self.audit(BadNews, None) testcount = self.test_loop(BadNews, max) self.Scenario.TearDown(self.CM) self.CM.oprofileSave(testcount) self.CM.oprofileStop() self.audit(BadNews, None) for test in self.Tests: self.IndividualStats[test.name] = test.Stats return self.Stats, self.IndividualStats class RandomTests(AllTests): def test_loop(self, BadNews, max): testcount=1 self.CM.log("Executing tests at random") while testcount <= max: test = self.Env.RandomGen.choice(self.Tests) if self.run_test(BadNews, test, testcount): testcount += 1 return testcount class BenchTests(AllTests): ''' Nothing (yet) here. ''' AllTestClasses = [ ] class CTSTest: ''' A Cluster test. We implement the basic set of properties and behaviors for a generic cluster test. Cluster tests track their own statistics. We keep each of the kinds of counts we track as separate {name,value} pairs. ''' def __init__(self, cm): #self.name="the unnamed test" self.Stats = {"calls":0 , "success":0 , "failure":0 , "skipped":0 , "auditfail":0} # if not issubclass(cm.__class__, ClusterManager): # raise ValueError("Must be a ClusterManager object") self.CM = cm self.Audits = [] self.timeout=120 self.starttime=0 self.passed = 1 self.is_loop = 0 self.is_unsafe = 0 self.is_experimental = 0 self.is_valgrind = 0 self.benchmark = 0 # which tests to benchmark def has_key(self, key): return self.Stats.has_key(key) def __setitem__(self, key, value): self.Stats[key] = value def __getitem__(self, key): return self.Stats[key] def set_starttime(self): self.starttime=time.time() self.CM.debug("MARK: test %s start" % self.name) return self.starttime def incr(self, name): '''Increment (or initialize) the value associated with the given name''' if not self.Stats.has_key(name): self.Stats[name]=0 self.Stats[name] = self.Stats[name]+1 # Reset the test passed boolean if name == "calls": self.passed = 1 def failure(self, reason="none"): '''Increment the failure count''' self.passed = 0 self.incr("failure") self.CM.log(("Test %s" % self.name).ljust(35) +" FAILED: %s" % reason) return None def success(self): '''Increment the success count''' self.incr("success") return 1 def skipped(self): '''Increment the skipped count''' self.incr("skipped") return 1 def __call__(self, node): '''Perform the given test''' raise ValueError("Abstract Class member (__call__)") self.incr("calls") return self.failure() def audit(self): passed = 1 if len(self.Audits) > 0: for audit in self.Audits: if not audit(): self.CM.log("Internal %s Audit %s FAILED." % (self.name, audit.name())) self.incr("auditfail") passed = 0 return passed def setup(self, node): '''Setup the given test''' return self.success() def teardown(self, node): '''Tear down the given test''' return self.success() def local_badnews(self, prefix, watch, local_ignore=[]): errcount = 0 if not prefix: prefix = "LocalBadNews:" ignorelist = [] ignorelist.append(" CTS: ") ignorelist.append(prefix) ignorelist.extend(local_ignore) while errcount < 100: match=watch.look(0) if match: add_err = 1 for ignore in ignorelist: if add_err == 1 and re.search(ignore, match): add_err = 0 if add_err == 1: self.CM.log(prefix + " " + match) errcount=errcount+1 else: break else: self.CM.log("Too many errors!") return errcount def is_applicable(self): return self.is_applicable_common() def is_applicable_common(self): '''Return TRUE if we are applicable in the current test configuration''' #raise ValueError("Abstract Class member (is_applicable)") if self.is_loop and not self.CM.Env["loop-tests"]: return 0 elif self.is_unsafe and not self.CM.Env["unsafe-tests"]: return 0 elif self.is_valgrind and not self.CM.Env["valgrind-tests"]: return 0 elif self.is_experimental and not self.CM.Env["experimental-tests"]: return 0 return 1 def find_ocfs2_resources(self, node): self.r_o2cb = None self.r_ocfs2 = [] (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "o2cb" and r.parent != "NA": self.CM.debug("Found o2cb: %s" % self.r_o2cb) self.r_o2cb = r.parent if re.search("^Constraint", line): c = AuditConstraint(self.CM, line) if c.type == "rsc_colocation" and c.target == self.r_o2cb: self.r_ocfs2.append(c.rsc) self.CM.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2)) return len(self.r_ocfs2) def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' return 1 def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [] ################################################################### class StopTest(CTSTest): ################################################################### '''Stop (deactivate) the cluster manager on a node''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name="Stop" def __call__(self, node): '''Perform the 'stop' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] != "up": return self.skipped() patterns = [] # Technically we should always be able to notice ourselves stopping patterns.append(self.CM["Pat:We_stopped"] % node) #if self.CM.Env["use_logd"]: # patterns.append(self.CM["Pat:Logd_stopped"] % node) # Any active node needs to notice this one left # NOTE: This wont work if we have multiple partitions for other in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[other] == "up" and other != node: patterns.append(self.CM["Pat:They_stopped"] %(other, node)) #self.debug("Checking %s will notice %s left"%(other, node)) watch = CTS.LogWatcher( self.CM["LogFileName"], patterns, self.CM["DeadTime"]) watch.setwatch() if node == self.CM.OurNode: self.incr("us") else: if self.CM.upcount() <= 1: self.incr("all") else: self.incr("them") self.CM.StopaCM(node) watch_result = watch.lookforall() failreason=None UnmatchedList = "||" if watch.unmatched: (rc, output) = self.CM.rsh(node, "/bin/ps axf", None) for line in output: self.CM.debug(line) for regex in watch.unmatched: self.CM.log ("ERROR: Shutdown pattern not found: %s" % (regex)) UnmatchedList += regex + "||"; failreason="Missing shutdown pattern" self.CM.cluster_stable(self.CM["DeadTime"]) if not watch.unmatched or self.CM.upcount() == 0: return self.success() if len(watch.unmatched) >= self.CM.upcount(): return self.failure("no match against (%s)" % UnmatchedList) if failreason == None: return self.success() else: return self.failure(failreason) # # We don't register StopTest because it's better when called by # another test... # ################################################################### class StartTest(CTSTest): ################################################################### '''Start (activate) the cluster manager on a node''' def __init__(self, cm, debug=None): CTSTest.__init__(self,cm) self.name="start" self.debug = debug def __call__(self, node): '''Perform the 'start' test. ''' self.incr("calls") if self.CM.upcount() == 0: self.incr("us") else: self.incr("them") if self.CM.ShouldBeStatus[node] != "down": return self.skipped() elif self.CM.StartaCM(node): return self.success() else: return self.failure("Startup %s on node %s failed" %(self.CM["Name"], node)) # # We don't register StartTest because it's better when called by # another test... # ################################################################### class FlipTest(CTSTest): ################################################################### '''If it's running, stop it. If it's stopped start it. Overthrow the status quo... ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Flip" self.start = StartTest(cm) self.stop = StopTest(cm) def __call__(self, node): '''Perform the 'Flip' test. ''' self.incr("calls") if self.CM.ShouldBeStatus[node] == "up": self.incr("stopped") ret = self.stop(node) type="up->down" # Give the cluster time to recognize it's gone... time.sleep(self.CM["StableTime"]) elif self.CM.ShouldBeStatus[node] == "down": self.incr("started") ret = self.start(node) type="down->up" else: return self.skipped() self.incr(type) if ret: return self.success() else: return self.failure("%s failure" % type) # Register FlipTest as a good test to run AllTestClasses.append(FlipTest) ################################################################### class RestartTest(CTSTest): ################################################################### '''Stop and restart a node''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Restart" self.start = StartTest(cm) self.stop = StopTest(cm) self.benchmark = 1 def __call__(self, node): '''Perform the 'restart' test. ''' self.incr("calls") self.incr("node:" + node) ret1 = 1 if self.CM.StataCM(node): self.incr("WasStopped") if not self.start(node): return self.failure("start (setup) failure: "+node) self.set_starttime() if not self.stop(node): return self.failure("stop failure: "+node) if not self.start(node): return self.failure("start failure: "+node) return self.success() # Register RestartTest as a good test to run AllTestClasses.append(RestartTest) ################################################################### class StonithdTest(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self, cm) self.name="Stonithd" self.startall = SimulStartLite(cm) self.benchmark = 1 def __call__(self, node): self.incr("calls") if len(self.CM.Env["nodes"]) < 2: return self.skipped() ret = self.startall(None) if not ret: return self.failure("Setup failed") watchpats = [] watchpats.append("Forcing node %s to be terminated" % node) watchpats.append("Scheduling Node %s for STONITH" % node) watchpats.append("Executing .* fencing operation") watchpats.append("stonith-ng:.*Operation .* for host '%s' with device .* returned: 0" % node) if not self.CM.is_node_dc(node): # Won't be found if the DC is shot (and there's no equivalent message from stonithd) watchpats.append("tengine_stonith_callback: .*result=0") # TODO else: look for the notification on a peer once implimented if self.CM.Env["at-boot"] == 0: self.CM.debug("Expecting %s to stay down" % node) self.CM.ShouldBeStatus[node]="down" else: self.CM.debug("Expecting %s to come up again %d" % (node, self.CM.Env["at-boot"])) watchpats.append("%s crmd: .* S_STARTING -> S_PENDING" % node) watchpats.append("%s crmd: .* S_PENDING -> S_NOT_DC" % node) watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats, self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"]) watch.setwatch() self.CM.rsh(node, "crm_attribute --node %s --type status --attr-name terminate --attr-value true" % node) matched = watch.lookforall() if matched: self.CM.debug("Found: "+ repr(matched)) else: self.CM.log("Patterns not found: " + repr(watch.unmatched)) self.CM.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.CM.debug("Waiting STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600) self.CM.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.CM["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") return self.success() def errorstoignore(self): return [ "Executing .* fencing operation" ] def is_applicable(self): if not self.is_applicable_common(): return 0 if self.CM.Env.has_key("DoStonith"): return self.CM.Env["DoStonith"] return 1 AllTestClasses.append(StonithdTest) ################################################################### class StartOnebyOne(CTSTest): ################################################################### '''Start all the nodes ~ one by one''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="StartOnebyOne" self.stopall = SimulStopLite(cm) self.start = StartTest(cm) self.ns=CTS.NodeStatus(cm.Env) def __call__(self, dummy): '''Perform the 'StartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Test setup failed") failed=[] self.set_starttime() for node in self.CM.Env["nodes"]: if not self.start(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to start: " + repr(failed)) return self.success() # Register StartOnebyOne as a good test to run AllTestClasses.append(StartOnebyOne) ################################################################### class SimulStart(CTSTest): ################################################################### '''Start all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStart" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'SimulStart' test. ''' self.incr("calls") # We ignore the "node" parameter... # Shut down all the nodes... ret = self.stopall(None) if not ret: return self.failure("Setup failed") self.CM.clear_all_caches() if not self.startall(None): return self.failure("Startall failed") return self.success() # Register SimulStart as a good test to run AllTestClasses.append(SimulStart) ################################################################### class SimulStop(CTSTest): ################################################################### '''Stop all the nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStop" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) def __call__(self, dummy): '''Perform the 'SimulStop' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.stopall(None): return self.failure("Stopall failed") return self.success() # Register SimulStop as a good test to run AllTestClasses.append(SimulStop) ################################################################### class StopOnebyOne(CTSTest): ################################################################### '''Stop all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="StopOnebyOne" self.startall = SimulStartLite(cm) self.stop = StopTest(cm) def __call__(self, dummy): '''Perform the 'StopOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") failed=[] self.set_starttime() for node in self.CM.Env["nodes"]: if not self.stop(node): failed.append(node) if len(failed) > 0: return self.failure("Some node failed to stop: " + repr(failed)) self.CM.clear_all_caches() return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(StopOnebyOne) ################################################################### class RestartOnebyOne(CTSTest): ################################################################### '''Restart all the nodes in order''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="RestartOnebyOne" self.startall = SimulStartLite(cm) def __call__(self, dummy): '''Perform the 'RestartOnebyOne' test. ''' self.incr("calls") # We ignore the "node" parameter... # Start up all the nodes... ret = self.startall(None) if not ret: return self.failure("Setup failed") did_fail=[] self.set_starttime() self.restart = RestartTest(self.CM) for node in self.CM.Env["nodes"]: if not self.restart(node): did_fail.append(node) if did_fail: return self.failure("Could not restart %d nodes: %s" %(len(did_fail), repr(did_fail))) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(RestartOnebyOne) ################################################################### class PartialStart(CTSTest): ################################################################### '''Start a node - but tell it to stop before it finishes starting up''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="PartialStart" self.startall = SimulStartLite(cm) self.stopall = SimulStopLite(cm) #self.is_unsafe = 1 def __call__(self, node): '''Perform the 'PartialStart' test. ''' self.incr("calls") ret = self.stopall(None) if not ret: return self.failure("Setup failed") # FIXME! This should use the CM class to get the pattern # then it would be applicable in general watchpats = [] watchpats.append("Starting crmd") watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats, timeout=self.CM["DeadTime"]+10) watch.setwatch() self.CM.StartaCMnoBlock(node) ret = watch.lookforall() if not ret: self.CM.log("Patterns not found: " + repr(watch.unmatched)) return self.failure("Setup of %s failed" % node) ret = self.stopall(None) if not ret: return self.failure("%s did not stop in time" % node) return self.success() # Register StopOnebyOne as a good test to run AllTestClasses.append(PartialStart) ####################################################################### class StandbyTest(CTSTest): ####################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Standby" self.benchmark = 1 self.start = StartTest(cm) self.startall = SimulStartLite(cm) # make sure the node is active # set the node to standby mode # check resources, none resource should be running on the node # set the node to active mode # check resouces, resources should have been migrated back (SHOULD THEY?) def __call__(self, node): self.incr("calls") ret=self.startall(None) if not ret: return self.failure("Start all nodes failed") self.CM.debug("Make sure node %s is active" % node) if self.CM.StandbyStatus(node) != "off": if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) self.CM.debug("Getting resources running on node %s" % node) rsc_on_node = self.CM.active_resources(node) self.CM.debug("Setting node %s to standby mode" % node) if not self.CM.SetStandbyMode(node, "on"): return self.failure("can't set node %s to standby mode" % node) time.sleep(30) # Allow time for the update to be applied and cause something self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "on": return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status)) self.CM.debug("Checking resources") bad_run = self.CM.active_resources(node) if len(bad_run) > 0: rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run))) self.CM.debug("Setting node %s to active mode" % node) self.CM.SetStandbyMode(node, "off") return rc self.CM.debug("Setting node %s to active mode" % node) if not self.CM.SetStandbyMode(node, "off"): return self.failure("can't set node %s to active mode" % node) time.sleep(30) # Allow time for the update to be applied and cause something self.CM.cluster_stable() status = self.CM.StandbyStatus(node) if status != "off": return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status)) return self.success() AllTestClasses.append(StandbyTest) ####################################################################### class ValgrindTest(CTSTest): ####################################################################### '''Check for memory leaks''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Valgrind" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_valgrind = 1 self.is_loop = 1 def setup(self, node): self.incr("calls") ret=self.stopall(None) if not ret: return self.failure("Stop all nodes failed") # Enable valgrind self.logPat = "/tmp/%s-*.valgrind" % self.name self.CM.Env["valgrind-prefix"] = self.name self.CM.rsh(node, "rm -f %s" % self.logPat, None) ret=self.startall(None) if not ret: return self.failure("Start all nodes failed") for node in self.CM.Env["nodes"]: (rc, output) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None) for line in output: self.CM.debug(line) return self.success() def teardown(self, node): # Disable valgrind self.CM.Env["valgrind-prefix"] = None # Return all nodes to normal ret=self.stopall(None) if not ret: return self.failure("Stop all nodes failed") return self.success() def find_leaks(self): # Check for leaks leaked = [] self.stop = StopTest(self.CM) for node in self.CM.Env["nodes"]: (rc, ps_out) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None) rc = self.stop(node) if not rc: self.failure("Couldn't shut down %s" % node) rc = self.CM.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e ERROR.*SUMMARY:.*[1-9].*errors %s" % self.logPat, 0) if rc != 1: leaked.append(node) self.failure("Valgrind errors detected on %s" % node) for line in ps_out: self.CM.log(line) (rc, output) = self.CM.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logPat, None) for line in output: self.CM.log(line) (rc, output) = self.CM.rsh(node, "cat %s" % self.logPat, None) for line in output: self.CM.debug(line) self.CM.rsh(node, "rm -f %s" % self.logPat, None) return leaked def __call__(self, node): leaked = self.find_leaks() if len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ """cib:.*readCibXmlFile:""", """HA_VALGRIND_ENABLED""" ] ####################################################################### class StandbyLoopTest(ValgrindTest): ####################################################################### '''Check for memory leaks by putting a node in and out of standby for an hour''' def __init__(self, cm): ValgrindTest.__init__(self,cm) self.name="StandbyLoop" def __call__(self, node): lpc = 0 delay = 2 failed = 0 done=time.time() + self.CM.Env["loop-minutes"]*60 while time.time() <= done and not failed: lpc = lpc + 1 time.sleep(delay) if not self.CM.SetStandbyMode(node, "on"): self.failure("can't set node %s to standby mode" % node) failed = lpc time.sleep(delay) if not self.CM.SetStandbyMode(node, "off"): self.failure("can't set node %s to active mode" % node) failed = lpc leaked = self.find_leaks() if failed: return self.failure("Iteration %d failed" % failed) elif len(leaked) > 0: return self.failure("Nodes %s leaked" % repr(leaked)) return self.success() AllTestClasses.append(StandbyLoopTest) ############################################################################## class BandwidthTest(CTSTest): ############################################################################## # Tests should not be cluster-manager-specific # If you need to find out cluster manager configuration to do this, then # it should be added to the generic cluster manager API. '''Test the bandwidth which heartbeat uses''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name = "Bandwidth" self.start = StartTest(cm) self.__setitem__("min",0) self.__setitem__("max",0) self.__setitem__("totalbandwidth",0) self.tempfile = tempfile.mktemp(".cts") self.startall = SimulStartLite(cm) def __call__(self, node): '''Perform the Bandwidth test''' self.incr("calls") if self.CM.upcount()<1: return self.skipped() Path = self.CM.InternalCommConfig() if "ip" not in Path["mediatype"]: return self.skipped() port = Path["port"][0] port = int(port) ret = self.startall(None) if not ret: return self.failure("Test setup failed") time.sleep(5) # We get extra messages right after startup. fstmpfile = "/var/run/band_estimate" dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \ % (port, fstmpfile) rc = self.CM.rsh(node, dumpcmd) if rc == 0: farfile = "root@%s:%s" % (node, fstmpfile) self.CM.rsh.cp(farfile, self.tempfile) Bandwidth = self.countbandwidth(self.tempfile) if not Bandwidth: self.CM.log("Could not compute bandwidth.") return self.success() intband = int(Bandwidth + 0.5) self.CM.log("...bandwidth: %d bits/sec" % intband) self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth if self.Stats["min"] == 0: self.Stats["min"] = Bandwidth if Bandwidth > self.Stats["max"]: self.Stats["max"] = Bandwidth if Bandwidth < self.Stats["min"]: self.Stats["min"] = Bandwidth self.CM.rsh(node, "rm -f %s" % fstmpfile) os.unlink(self.tempfile) return self.success() else: return self.failure("no response from tcpdump command [%d]!" % rc) def countbandwidth(self, file): fp = open(file, "r") fp.seek(0) count = 0 sum = 0 while 1: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count=count+1 linesplit = string.split(line," ") for j in range(len(linesplit)-1): if linesplit[j]=="udp": break if linesplit[j]=="length:": break try: sum = sum + int(linesplit[j+1]) except ValueError: self.CM.log("Invalid tcpdump line: %s" % line) return None T1 = linesplit[0] timesplit = string.split(T1,":") time2split = string.split(timesplit[2],".") time1 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001 break while count < 100: line = fp.readline() if not line: return None if re.search("udp",line) or re.search("UDP,", line): count = count+1 linessplit = string.split(line," ") for j in range(len(linessplit)-1): if linessplit[j] =="udp": break if linesplit[j]=="length:": break try: sum=int(linessplit[j+1])+sum except ValueError: self.CM.log("Invalid tcpdump line: %s" % line) return None T2 = linessplit[0] timesplit = string.split(T2,":") time2split = string.split(timesplit[2],".") time2 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001 time = time2-time1 if (time <= 0): return 0 return (sum*8)/time def is_applicable(self): '''BandwidthTest never applicable''' return 0 AllTestClasses.append(BandwidthTest) ################################################################### class ResourceRecover(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="ResourceRecover" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.max=30 self.rid=None #self.is_unsafe = 1 self.benchmark = 1 # these are the values used for the new LRM API call self.action = "asyncmon" self.interval = 0 def __call__(self, node): '''Perform the 'ResourceRecover' test. ''' self.incr("calls") ret = self.startall(None) if not ret: return self.failure("Setup failed") resourcelist = self.CM.active_resources(node) # if there are no resourcelist, return directly if len(resourcelist)==0: self.CM.log("No active resources on %s" % node) return self.skipped() self.rid = self.CM.Env.RandomGen.choice(resourcelist) rsc = None (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): tmp = AuditResource(self.CM, line) if tmp.id == self.rid: rsc = tmp # Handle anonymous clones that get renamed self.rid = rsc.clone_id break if not rsc: return self.failure("Could not find %s in the resource list" % self.rid) self.CM.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id)) pats = [] pats.append("Updating failcount for %s on .* after .* %s" % (self.rid, self.action)) if rsc.managed(): pats.append("crmd:.* Performing .* op=%s_stop_0" % self.rid) if rsc.unique(): pats.append("crmd:.* Performing .* op=%s_start_0" % self.rid) pats.append("crmd:.* LRM operation %s_start_0.*confirmed.*ok" % self.rid) else: # Anonymous clones may get restarted with a different clone number pats.append("crmd:.* Performing .* op=.*_start_0") pats.append("crmd:.* LRM operation .*_start_0.*confirmed.*ok") watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60) watch.setwatch() self.CM.rsh(node, "crm_resource -F -r %s -H %s &>/dev/null" % (self.rid, node)) watch.lookforall() self.CM.cluster_stable() recovered=self.CM.ResourceLocation(self.rid) if watch.unmatched: return self.failure("Patterns not found: %s" % repr(watch.unmatched)) elif rsc.unique() and len(recovered) > 1: return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered))) elif len(recovered) > 0: self.CM.debug("%s is running on: %s" %(self.rid, repr(recovered))) elif rsc.managed(): return self.failure("%s was not recovered and is inactive" % self.rid) return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' return [ """Updating failcount for %s""" % self.rid, """Unknown operation: fail""", """ERROR: sending stonithRA op to stonithd failed.""", """ERROR: process_lrm_event: LRM operation %s_%s_%d""" % (self.rid, self.action, self.interval), """ERROR: process_graph_event: Action %s_%s_%d .* initiated outside of a transition""" % (self.rid, self.action, self.interval), ] AllTestClasses.append(ResourceRecover) ################################################################### class ComponentFail(CTSTest): ################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="ComponentFail" self.startall = SimulStartLite(cm) self.complist = cm.Components() self.patterns = [] self.okerrpatterns = [] self.is_unsafe = 1 def __call__(self, node): '''Perform the 'ComponentFail' test. ''' self.incr("calls") self.patterns = [] self.okerrpatterns = [] # start all nodes ret = self.startall(None) if not ret: return self.failure("Setup failed") if not self.CM.cluster_stable(self.CM["StableTime"]): return self.failure("Setup failed - unstable") node_is_dc = self.CM.is_node_dc(node, None) # select a component to kill chosen = self.CM.Env.RandomGen.choice(self.complist) while chosen.dc_only == 1 and node_is_dc == 0: chosen = self.CM.Env.RandomGen.choice(self.complist) self.CM.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot)) self.incr(chosen.name) if chosen.name != "aisexec": if self.CM["Name"] != "crm-lha" or chosen.name != "pengine": self.patterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name)) self.patterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name)) self.patterns.extend(chosen.pats) if node_is_dc: self.patterns.extend(chosen.dc_pats) # In an ideal world, this next stuff should be in the "chosen" object as a member function if self.CM["Name"] == "crm-lha" and chosen.triggersreboot: # Make sure the node goes down and then comes back up if it should reboot... for other in self.CM.Env["nodes"]: if other != node: self.patterns.append(self.CM["Pat:They_stopped"] %(other, node)) self.patterns.append(self.CM["Pat:Slave_started"] % node) self.patterns.append(self.CM["Pat:Local_started"] % node) if chosen.dc_only: # Sometimes these will be in the log, and sometimes they won't... self.okerrpatterns.append("%s crmd:.*Process %s:.* exited" %(node, chosen.name)) self.okerrpatterns.append("%s crmd:.*I_ERROR.*crmdManagedChildDied" %node) self.okerrpatterns.append("%s crmd:.*The %s subsystem terminated unexpectedly" %(node, chosen.name)) self.okerrpatterns.append("ERROR: Client .* exited with return code") else: # Sometimes this won't be in the log... self.okerrpatterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name)) self.okerrpatterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name)) self.okerrpatterns.append(self.CM["Pat:ChildExit"]) # supply a copy so self.patterns doesnt end up empty tmpPats = [] tmpPats.extend(self.patterns) self.patterns.extend(chosen.badnews_ignore) # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status stonithPats = [] stonithPats.append("stonith-ng:.*Operation .* for host '%s' with device .* returned: 0" % node) stonith = CTS.LogWatcher(self.CM["LogFileName"], stonithPats, 0) stonith.setwatch() # set the watch for stable watch = CTS.LogWatcher( self.CM["LogFileName"], tmpPats, self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"]) watch.setwatch() # kill the component chosen.kill(node) # check to see Heartbeat noticed matched = watch.lookforall(allow_multiple_matches=1) if matched: self.CM.debug("Found: "+ repr(matched)) else: self.CM.log("Patterns not found: " + repr(watch.unmatched)) if self.CM.Env["at-boot"] == 0: self.CM.debug("Checking if %s was shot" % node) shot = stonith.look(60) if shot: self.CM.debug("Found: "+ repr(shot)) self.CM.ShouldBeStatus[node]="down" self.CM.debug("Waiting for the cluster to recover") self.CM.cluster_stable() self.CM.debug("Waiting for any STONITHd node to come back up") self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600) self.CM.debug("Waiting for the cluster to re-stabilize with all nodes") is_stable = self.CM.cluster_stable(self.CM["StartTime"]) if not matched: return self.failure("Didn't find all expected patterns") elif not is_stable: return self.failure("Cluster did not become stable") return self.success() def errorstoignore(self): '''Return list of errors which should be ignored''' # Note that okerrpatterns refers to the last time we ran this test # The good news is that this works fine for us... self.okerrpatterns.extend(self.patterns) return self.okerrpatterns AllTestClasses.append(ComponentFail) #################################################################### class SplitBrainTest(CTSTest): #################################################################### '''It is used to test split-brain. when the path between the two nodes break check the two nodes both take over the resource''' def __init__(self,cm): CTSTest.__init__(self,cm) self.name = "SplitBrain" self.start = StartTest(cm) self.startall = SimulStartLite(cm) self.is_experimental = 1 def isolate_partition(self, partition): other_nodes = [] other_nodes.extend(self.CM.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]) + " from " +repr(partition)) if len(other_nodes) == 0: return 1 self.CM.debug("Creating partition: " + repr(partition)) self.CM.debug("Everyone else: " + repr(other_nodes)) for node in partition: if not self.CM.isolate_node(node, other_nodes): self.CM.log("Could not isolate %s" % node) return 0 return 1 def heal_partition(self, partition): other_nodes = [] other_nodes.extend(self.CM.Env["nodes"]) for node in partition: try: other_nodes.remove(node) except ValueError: self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"])) if len(other_nodes) == 0: return 1 self.CM.debug("Healing partition: " + repr(partition)) self.CM.debug("Everyone else: " + repr(other_nodes)) for node in partition: self.CM.unisolate_node(node, other_nodes) def __call__(self, node): '''Perform split-brain test''' self.incr("calls") self.passed = 1 partitions = {} ret = self.startall(None) if not ret: return self.failure("Setup failed") while 1: # Retry until we get multiple partitions partitions = {} p_max = len(self.CM.Env["nodes"]) for node in self.CM.Env["nodes"]: p = self.CM.Env.RandomGen.randint(1, p_max) if not partitions.has_key(p): partitions[p]= [] partitions[p].append(node) p_max = len(partitions.keys()) if p_max > 1: break # else, try again self.CM.debug("Created %d partitions" % p_max) for key in partitions.keys(): self.CM.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) # Disabling STONITH to reduce test complexity for now self.CM.rsh(node, "crm_attribute -n stonith-enabled -v false") for key in partitions.keys(): self.isolate_partition(partitions[key]) count = 30 while count > 0: if len(self.CM.find_partitions()) != p_max: time.sleep(10) else: break else: self.failure("Expected partitions were not created") # Target number of partitions formed - wait for stability if not self.CM.cluster_stable(): self.failure("Partitioned cluster not stable") # Now audit the cluster state self.CM.partitions_expected = p_max if not self.audit(): self.failure("Audits failed") self.CM.partitions_expected = 1 # And heal them again for key in partitions.keys(): self.heal_partition(partitions[key]) # Wait for a single partition to form count = 30 while count > 0: if len(self.CM.find_partitions()) != 1: time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not reform") # Wait for it to have the right number of members count = 30 while count > 0: members = [] partitions = self.CM.find_partitions() if len(partitions) > 0: members = partitions[0].split() if len(members) != len(self.CM.Env["nodes"]): time.sleep(10) count -= 1 else: break else: self.failure("Cluster did not completely reform") # Wait up to 20 minutes - the delay is more preferable than # trying to continue with in a messed up state if not self.CM.cluster_stable(1200): self.failure("Reformed cluster not stable") answer = raw_input('Continue? [nY]') if answer and answer == "n": raise ValueError("Reformed cluster not stable") # Turn fencing back on if self.CM.Env["DoStonith"]: self.CM.rsh(node, "crm_attribute -D -n stonith-enabled") self.CM.cluster_stable() if self.passed: return self.success() return self.failure("See previous errors") def errorstoignore(self): '''Return list of errors which are 'normal' and should be ignored''' return [ "Another DC detected:", "ERROR: attrd_cib_callback: .*Application of an update diff failed", "crmd_ha_msg_callback:.*not in our membership list", "CRIT:.*node.*returning after partition", ] def is_applicable(self): if not self.is_applicable_common(): return 0 return len(self.CM.Env["nodes"]) > 2 AllTestClasses.append(SplitBrainTest) #################################################################### class Reattach(CTSTest): #################################################################### def __init__(self, cm): CTSTest.__init__(self,cm) self.name="Reattach" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) self.is_unsafe = 0 # Handled by canrunnow() def setup(self, node): return self.startall(None) def canrunnow(self, node): '''Return TRUE if we can meaningfully run right now''' if self.find_ocfs2_resources(node): self.CM.log("Detach/Reattach scenarios are not possible with OCFS2 services present") return 0 return 1 def __call__(self, node): self.incr("calls") pats = [] managed = CTS.LogWatcher(self.CM["LogFileName"], ["is-managed-default"], timeout=60) managed.setwatch() self.CM.debug("Disable resource management") self.CM.rsh(node, "crm_attribute -n is-managed-default -v false") if not managed.lookforall(): self.CM.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not disabled") pats = [] pats.append("crmd:.*Performing.*_stop_0") pats.append("crmd:.*Performing.*_start_0") pats.append("crmd:.*Performing.*_promote_0") pats.append("crmd:.*Performing.*_demote_0") pats.append("crmd:.*Performing.*_migrate_.*_0") watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60) watch.setwatch() self.CM.debug("Shutting down the cluster") ret = self.stopall(None) if not ret: self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") return self.failure("Couldn't shut down the cluster") self.CM.debug("Bringing the cluster back up") ret = self.startall(None) if not ret: self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") return self.failure("Couldn't restart the cluster") if self.local_badnews("ResourceActivity:", watch): self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") return self.failure("Resources stopped or started during cluster restart") watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60) watch.setwatch() managed = CTS.LogWatcher(self.CM["LogFileName"], ["is-managed-default"], timeout=60) managed.setwatch() self.CM.debug("Re-enable resource management") self.CM.rsh(node, "crm_attribute -D -n is-managed-default") if not managed.lookforall(): self.CM.log("Patterns not found: " + repr(managed.unmatched)) return self.failure("Resource management not enabled") self.CM.cluster_stable() # Ignore actions for STONITH resources ignore = [] (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rclass == "stonith": self.CM.debug("Ignoring: crmd:.*Performing.*op=%s_.*_0" % r.id) ignore.append("crmd:.*Performing.*op=%s_.*_0" % r.id) if self.local_badnews("ResourceActivity:", watch, ignore): return self.failure("Resources stopped or started after resource management was re-enabled") return ret def errorstoignore(self): '''Return list of errors which should be ignored''' return [ "You may ignore this error if it is unmanaged.", "pingd: .*ERROR: send_ipc_message:", "pingd: .*ERROR: send_update:", ] def is_applicable(self): if self.CM["Name"] == "crm-lha": return None return 1 AllTestClasses.append(Reattach) #################################################################### class SpecialTest1(CTSTest): #################################################################### '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SpecialTest1" self.startall = SimulStartLite(cm) self.restart1 = RestartTest(cm) self.stopall = SimulStopLite(cm) def __call__(self, node): '''Perform the 'SpecialTest1' test for Andrew. ''' self.incr("calls") # Shut down all the nodes... ret = self.stopall(None) if not ret: return ret # Start the selected node ret = self.restart1(node) if not ret: return ret # Start all remaining nodes ret = self.startall(None) return ret AllTestClasses.append(SpecialTest1) #################################################################### class HAETest(CTSTest): #################################################################### '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="HAETest" self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) self.is_loop = 1 def setup(self, node): # Start all remaining nodes ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") return self.success() def wait_on_state(self, node, resource, expected_clones, attempts=240): while attempts > 0: active=0 (rc, lines) = self.CM.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None) # Hack until crm_resource does the right thing if rc == 0 and lines: active = len(lines) if len(lines) == expected_clones: return 1 elif rc == 1: self.CM.debug("Resource %s is still inactive" % resource) elif rc == 234: self.CM.log("Unknown resource %s" % resource) return 0 elif rc == 246: self.CM.log("Cluster is inactive") return 0 elif rc != 0: self.CM.log("Call to crm_resource failed, rc=%d" % rc) return 0 else: self.CM.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones)) attempts -= 1 time.sleep(1) return 0 def find_dlm(self, node): self.r_dlm = None (rc, lines) = self.CM.rsh(node, "crm_resource -c", None) for line in lines: if re.search("^Resource", line): r = AuditResource(self.CM, line) if r.rtype == "controld" and r.parent != "NA": self.CM.debug("Found dlm: %s" % self.r_dlm) self.r_dlm = r.parent return 1 return 0 def find_hae_resources(self, node): self.r_dlm = None self.r_o2cb = None self.r_ocfs2 = [] if self.find_dlm(node): self.find_ocfs2_resources(node) def is_applicable(self): if not self.is_applicable_common(): return 0 if self.CM.Env["Schema"] == "hae": return 1 return None #################################################################### class HAERoleTest(HAETest): #################################################################### def __init__(self, cm): '''Lars' mount/unmount test for the HA extension. ''' HAETest.__init__(self,cm) self.name="HAERoleTest" def change_state(self, node, resource, target): rc = self.CM.rsh(node, "crm_resource -r %s -p target-role -v %s --meta" % (resource, target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 delay = 2 done=time.time() + self.CM.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.CM.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "Stopped") if not self.wait_on_state(node, self.r_dlm, 0): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "Started") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAERoleTest) #################################################################### class HAEStandbyTest(HAETest): #################################################################### '''Set up a custom test to cause quorum failure issues for Andrew''' def __init__(self, cm): HAETest.__init__(self,cm) self.name="HAEStandbyTest" def change_state(self, node, resource, target): rc = self.CM.rsh(node, "crm_standby -l reboot -v %s" % (target)) return rc def __call__(self, node): self.incr("calls") lpc = 0 failed = 0 done=time.time() + self.CM.Env["loop-minutes"]*60 self.find_hae_resources(node) clone_max = len(self.CM.Env["nodes"]) while time.time() <= done and not failed: lpc = lpc + 1 self.change_state(node, self.r_dlm, "true") if not self.wait_on_state(node, self.r_dlm, clone_max-1): self.failure("%s did not go down correctly" % self.r_dlm) failed = lpc self.change_state(node, self.r_dlm, "false") if not self.wait_on_state(node, self.r_dlm, clone_max): self.failure("%s did not come up correctly" % self.r_dlm) failed = lpc if not self.wait_on_state(node, self.r_o2cb, clone_max): self.failure("%s did not come up correctly" % self.r_o2cb) failed = lpc for fs in self.r_ocfs2: if not self.wait_on_state(node, fs, clone_max): self.failure("%s did not come up correctly" % fs) failed = lpc if failed: return self.failure("iteration %d failed" % failed) return self.success() AllTestClasses.append(HAEStandbyTest) ################################################################### class NearQuorumPointTest(CTSTest): ################################################################### ''' This test brings larger clusters near the quorum point (50%). In addition, it will test doing starts and stops at the same time. Here is how I think it should work: - loop over the nodes and decide randomly which will be up and which will be down Use a 50% probability for each of up/down. - figure out what to do to get into that state from the current state - in parallel, bring up those going up and bring those going down. ''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="NearQuorumPoint" def __call__(self, dummy): '''Perform the 'NearQuorumPoint' test. ''' self.incr("calls") startset = [] stopset = [] #decide what to do with each node for node in self.CM.Env["nodes"]: action = self.CM.Env.RandomGen.choice(["start","stop"]) #action = self.CM.Env.RandomGen.choice(["start","stop","no change"]) if action == "start" : startset.append(node) elif action == "stop" : stopset.append(node) self.CM.debug("start nodes:" + repr(startset)) self.CM.debug("stop nodes:" + repr(stopset)) #add search patterns watchpats = [ ] for node in stopset: if self.CM.ShouldBeStatus[node] == "up": watchpats.append(self.CM["Pat:We_stopped"] % node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": #watchpats.append(self.CM["Pat:Slave_started"] % node) watchpats.append(self.CM["Pat:Local_started"] % node) else: for stopping in stopset: if self.CM.ShouldBeStatus[stopping] == "up": watchpats.append(self.CM["Pat:They_stopped"] % (node, stopping)) if len(watchpats) == 0: return self.skipped() if len(startset) != 0: watchpats.append(self.CM["Pat:DC_IDLE"]) watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats , timeout=self.CM["DeadTime"]+10) watch.setwatch() #begin actions for node in stopset: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) for node in startset: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) #get the result if watch.lookforall(): self.CM.cluster_stable() return self.success() self.CM.log("Warn: Patterns not found: " + repr(watch.unmatched)) #get the "bad" nodes upnodes = [] for node in stopset: if self.CM.StataCM(node) == 1: upnodes.append(node) downnodes = [] for node in startset: if self.CM.StataCM(node) == 0: downnodes.append(node) if upnodes == [] and downnodes == []: self.CM.cluster_stable() + + # Make sure they're completely down with no residule + for node in stopset: + self.CM.rsh(node, self["StopCmd"]) + return self.success() if len(upnodes) > 0: self.CM.log("Warn: Unstoppable nodes: " + repr(upnodes)) if len(downnodes) > 0: self.CM.log("Warn: Unstartable nodes: " + repr(downnodes)) return self.failure() AllTestClasses.append(NearQuorumPointTest) ################################################################### class RollingUpgradeTest(CTSTest): ################################################################### '''Perform a rolling upgrade of the cluster''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="RollingUpgrade" self.start = StartTest(cm) self.stop = StopTest(cm) self.stopall = SimulStopLite(cm) self.startall = SimulStartLite(cm) def setup(self, node): # Start all remaining nodes ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.CM.Env["nodes"]: if not self.downgrade(node, None): return self.failure("Couldn't downgrade %s" % node) ret = self.startall(None) if not ret: return self.failure("Couldn't start all nodes") return self.success() def teardown(self, node): # Stop everything ret = self.stopall(None) if not ret: return self.failure("Couldn't stop all nodes") for node in self.CM.Env["nodes"]: if not self.upgrade(node, None): return self.failure("Couldn't upgrade %s" % node) return self.success() def install(self, node, version, start=1, flags="--force"): target_dir = "/tmp/rpm-%s" % version src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version) self.CM.log("Installing %s on %s with %s" % (version, node, flags)) if not self.stop(node): return self.failure("stop failure: "+node) rc = self.CM.rsh(node, "mkdir -p %s" % target_dir) rc = self.CM.rsh(node, "rm -f %s/*.rpm" % target_dir) (rc, lines) = self.CM.rsh(node, "ls -1 %s/*.rpm" % src_dir, None) for line in lines: line = line[:-1] rc = self.CM.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir)) rc = self.CM.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir)) if start and not self.start(node): return self.failure("start failure: "+node) return self.success() def upgrade(self, node, start=1): return self.install(node, self.CM.Env["current-version"], start) def downgrade(self, node, start=1): return self.install(node, self.CM.Env["previous-version"], start, "--force --nodeps") def __call__(self, node): '''Perform the 'Rolling Upgrade' test. ''' self.incr("calls") for node in self.CM.Env["nodes"]: if self.upgrade(node): return self.failure("Couldn't upgrade %s" % node) self.CM.cluster_stable() return self.success() def is_applicable(self): if not self.is_applicable_common(): return None if not self.CM.Env.has_key("rpm-dir"): return None if not self.CM.Env.has_key("current-version"): return None if not self.CM.Env.has_key("previous-version"): return None return 1 # Register RestartTest as a good test to run AllTestClasses.append(RollingUpgradeTest) ################################################################### class BSC_AddResource(CTSTest): ################################################################### '''Add a resource to the cluster''' def __init__(self, cm): CTSTest.__init__(self, cm) self.name="AddResource" self.resource_offset = 0 self.cib_cmd="""cibadmin -C -o %s -X '%s' """ def __call__(self, node): self.incr("calls") self.resource_offset = self.resource_offset + 1 r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset) start_pat = "crmd.*%s_start_0.*confirmed.*ok" patterns = [] patterns.append(start_pat % r_id) watch = CTS.LogWatcher( self.CM["LogFileName"], patterns, self.CM["DeadTime"]) watch.setwatch() fields = string.split(self.CM.Env["IPBase"], '.') fields[3] = str(int(fields[3])+1) ip = string.join(fields, '.') self.CM.Env["IPBase"] = ip if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip): return self.failure("Make resource %s failed" % r_id) failed = 0 watch_result = watch.lookforall() if watch.unmatched: for regex in watch.unmatched: self.CM.log ("Warn: Pattern not found: %s" % (regex)) failed = 1 if failed: return self.failure("Resource pattern(s) not found") if not self.CM.cluster_stable(self.CM["DeadTime"]): return self.failure("Unstable cluster") return self.success() def make_ip_resource(self, node, id, rclass, type, ip): self.CM.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node)) rsc_xml=""" """ % (id, rclass, type, id, id, ip) node_constraint=""" """ % (id, id, id, id, node) rc = 0 (rc, lines) = self.CM.rsh(node, self.cib_cmd % ("constraints", node_constraint), None) if rc != 0: self.CM.log("Constraint creation failed: %d" % rc) return None (rc, lines) = self.CM.rsh(node, self.cib_cmd % ("resources", rsc_xml), None) if rc != 0: self.CM.log("Resource creation failed: %d" % rc) return None return 1 def is_applicable(self): if self.CM.Env["DoBSC"]: return 1 return None class SimulStopLite(CTSTest): ################################################################### '''Stop any active nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStopLite" def __call__(self, dummy): '''Perform the 'SimulStopLite' setup work. ''' self.incr("calls") self.CM.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.incr("WasStarted") watchpats.append(self.CM["Pat:We_stopped"] % node) #if self.CM.Env["use_logd"]: # watchpats.append(self.CM["Pat:Logd_stopped"] % node) if len(watchpats) == 0: self.CM.clear_all_caches() return self.success() # Stop all the nodes - at about the same time... watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats , timeout=self.CM["DeadTime"]+10) watch.setwatch() self.set_starttime() for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "up": self.CM.StopaCMnoBlock(node) if watch.lookforall(): self.CM.clear_all_caches() + + # Make sure they're completely down with no residule + for node in self.CM.Env["nodes"]: + self.CM.rsh(node, self["StopCmd"]) + return self.success() did_fail=0 up_nodes = [] for node in self.CM.Env["nodes"]: if self.CM.StataCM(node) == 1: did_fail=1 up_nodes.append(node) if did_fail: return self.failure("Active nodes exist: " + repr(up_nodes)) self.CM.log("Warn: All nodes stopped but CTS didnt detect: " + repr(watch.unmatched)) self.CM.clear_all_caches() return self.failure("Missing log message: "+repr(watch.unmatched)) def is_applicable(self): '''SimulStopLite is a setup test and never applicable''' return 0 ################################################################### class SimulStartLite(CTSTest): ################################################################### '''Start any stopped nodes ~ simultaneously''' def __init__(self, cm): CTSTest.__init__(self,cm) self.name="SimulStartLite" def __call__(self, dummy): '''Perform the 'SimulStartList' setup work. ''' self.incr("calls") self.CM.debug("Setup: " + self.name) # We ignore the "node" parameter... watchpats = [ ] uppat = self.CM["Pat:Slave_started"] if self.CM.upcount() == 0: uppat = self.CM["Pat:Local_started"] for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.incr("WasStopped") watchpats.append(uppat % node) if len(watchpats) == 0: return self.success() watchpats.append(self.CM["Pat:DC_IDLE"]) # Start all the nodes - at about the same time... watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats , timeout=self.CM["DeadTime"]+10) watch.setwatch() self.set_starttime() for node in self.CM.Env["nodes"]: if self.CM.ShouldBeStatus[node] == "down": self.CM.StartaCMnoBlock(node) if watch.lookforall(): for attempt in (1, 2, 3, 4, 5): if self.CM.cluster_stable(): return self.success() return self.failure("Cluster did not stabilize") did_fail=0 unstable = [] for node in self.CM.Env["nodes"]: if self.CM.StataCM(node) == 0: did_fail=1 unstable.append(node) if did_fail: return self.failure("Unstarted nodes exist: " + repr(unstable)) unstable = [] for node in self.CM.Env["nodes"]: if not self.CM.node_stable(node): did_fail=1 unstable.append(node) if did_fail: return self.failure("Unstable cluster nodes exist: " + repr(unstable)) self.CM.log("ERROR: All nodes started but CTS didnt detect: " + repr(watch.unmatched)) return self.failure() def is_applicable(self): '''SimulStartLite is a setup test and never applicable''' return 0 def TestList(cm, audits): result = [] for testclass in AllTestClasses: bound_test = testclass(cm) if bound_test.is_applicable(): bound_test.Audits = audits result.append(bound_test) return result def BenchTestList(cm, audits): all = TestList(cm, audits) result = [] for test in all: if test.benchmark: result.append(test) return result diff --git a/doc/Pacemaker_Explained/en-US/Ap-OCF.xml b/doc/Pacemaker_Explained/en-US/Ap-OCF.xml index 79a5bd4e91..f86e2d7378 100644 --- a/doc/Pacemaker_Explained/en-US/Ap-OCF.xml +++ b/doc/Pacemaker_Explained/en-US/Ap-OCF.xml @@ -1,209 +1,209 @@ More About OCF Resource Agents
Location of Custom Scripts - OCF Resource Agents are found in /usr/lib/ocf/resource.d/{provider}. + OCF Resource Agents are found in /usr/lib/ocf/resource.d/provider. When creating your own agents, you are encouraged to create a new directory under /usr/lib/ocf/resource.d/ so that they are not confused with (or overwritten by) the agents shipped with Heartbeat. So, for example, if you chose the provider name of bigCorp and wanted a new resource named bigApp, you would create a script called /usr/lib/ocf/resource.d/bigCorp/bigApp and define a resource: <primitive id="custom-app" class="ocf" provider="bigCorp" type="bigApp"/>
Actions All OCF Resource Agents are required to implement the following actions Required Actions for OCF Agents Action Description Instructions start Start the resource Return 0 on success and an appropriate error code otherwise. Must not report success until the resource is fully active. stop Stop the resource Return 0 on success and an appropriate error code otherwise. Must not report success until the resource is fully stopped. monitor Check the resource's state Exit 0 if the resource is running, 7 if it is stopped and anything else if it is failed. NOTE: The monitor script should test the state of the resource on the local machine only. meta-data Describe the resource Provide information about this resource as an XML snippet. Exit with 0. NOTE: This is not performed as root. validate-all Verify the supplied parameters are correct Exit with 0 if parameters are valid, 2 if not valid, 6 if resource is not configured.
Additional requirements (not part of the OCF specs) are placed on agents that will be used for advanced concepts like clones and multi-state resources. Optional Actions for OCF Agents Action Description Instructions promote Promote the local instance of a multi-state resource to the master/primary state Return 0 on success demote Demote the local instance of a multi-state resource to the slave/secondary state Return 0 on success notify Used by the cluster to send the agent pre and post notification events telling the resource what is or did just take place Must not fail. Must exit 0
Some actions specified in the OCF specs are not currently used by the cluster reload - reload the configuration of the resource instance without disrupting the service recover - a variant of the start action, this should try to recover a resource locally. Remember to use ocf-tester to verify that your new agent complies with the OCF standard properly.
How Does the Cluster Interpret the OCF Return Codes? The first thing the cluster does is check the return code against the expected result. If the result does not match the expected value, then the operation is considered to have failed and recovery action is initiated. There are three types of failure recovery: Types of recovery performed by the cluster Recovery Type Description Action Taken by the Cluster soft A transient error occurred Restart the resource or move it to a new location hard A non-transient error that may be specific to the current node occurred Move the resource elsewhere and prevent it from being retried on the current node fatal A non-transient error that will be common to all cluster nodes (I.e. a bad configuration was specified) Stop the resource and prevent it from being started on any cluster node
Assuming an action is considered to have failed, the following table outlines the different OCF return codes and the type of recovery the cluster will initiate when it is received. OCF Return Codes and How They are Handled OCF Return Code OCF Alias Description Recovery Type 0 OCF_SUCCESS Success. The command complete successfully. This is the expected result for all start, stop, promote and demote commands. soft 1 OCF_ERR_GENERIC Generic "there was a problem" error code. soft 2 OCF_ERR_ARGS The resource's configuration is not valid on this machine. Eg. Refers to a location/tool not found on the node. hard 3 OCF_ERR_UNIMPLEMENTED The requested action is not implemented. hard 4 OCF_ERR_PERM The resource agent does not have sufficient privileges to complete the task. hard 5 OCF_ERR_INSTALLED The tools required by the resource are not installed on this machine. hard 6 OCF_ERR_CONFIGURED The resource's configuration is invalid. Eg. A required parameters are missing. fatal 7 OCF_NOT_RUNNING The resource is safely stopped. The cluster will not attempt to stop a resource that returns this for any action. N/A 8 OCF_RUNNING_MASTER The resource is running in Master mode. soft 9 OCF_FAILED_MASTER The resource is in Master mode but has failed. The resource will be demoted, stopped and then started (and possibly promoted) again. soft other NA Custom error code. soft
Although counter intuitive, even actions that return 0 (aka. OCF_SUCCESS) can be considered to have failed. This can happen when a resource that is expected to be in the Master state is found running as a Slave, or when a resource is found active on multiple machines..
Exceptions Non-recurring monitor actions (probes) that find a resource active (or in Master mode) will not result in recovery action unless it is also found active elsewhere The recovery action taken when a resource is found active more than once is determined by the multiple-active property of the resource Recurring actions that return OCF_ERR_UNIMPLEMENTED do not cause any type of recovery
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Basics.xml b/doc/Pacemaker_Explained/en-US/Ch-Basics.xml index 43b979eeb4..789ecf6224 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Basics.xml +++ b/doc/Pacemaker_Explained/en-US/Ch-Basics.xml @@ -1,308 +1,308 @@ Configuration Basics
Configuration Layout The cluster is written using XML notation and divided into two main sections; configuration and status. The status section contains the history of each resource on each node and based on this data, the cluster can construct the complete current state of the cluster. The authoritative source for the status section is the local resource manager (lrmd) process on each cluster node and the cluster will occasionally repopulate the entire section. For this reason it is never written to disk and admin's are advised against modifying it in any way. The configuration section contains the more traditional information like cluster options, lists of resources and indications of where they should be placed. The configuration section is the primary focus of this document. The configuration section itself is divided into four parts: Configuration options (called crm_config) Nodes Resources Resource relationships (called constraints) An empty configuration ]]>
The Current State of the Cluster Before one starts to configure a cluster, it is worth explaining how to view the finished product. For this purpose we have created the crm_mon utility that will display the current state of an active cluster. It can show the cluster status by node or by resource and can be used in either single-shot or dynamically-updating mode. There are also modes for displaying a list of the operations performed (grouped by node and resource) as well as information about failures. Using this tool, you can examine the state of the cluster for irregularities and see how it responds when you cause or simulate failures. Details on all the available options can be obtained using the crm_mon --help command.
Sample output from crm_mon # crm_mon ============ Last updated: Fri Nov 23 15:26:13 2007 Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec) 3 Nodes configured. 5 Resources configured. ============ Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1 192.168.100.182 (heartbeat:IPaddr): Started sles-1 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1 rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1 child_DoFencing:2 (stonith:external/vmware): Started sles-1 Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3 rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3 child_DoFencing:0 (stonith:external/vmware): Started sles-3
Sample output from crm_mon -n # crm_mon -n ============ Last updated: Fri Nov 23 15:26:13 2007 Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec) 3 Nodes configured. 5 Resources configured. ============ Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online Resource Group: group-1 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1 192.168.100.182 (heartbeat:IPaddr): Started sles-1 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1 rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1 rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3 rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3 Clone Set: DoFencing child_DoFencing:0 (stonith:external/vmware): Started sles-3 child_DoFencing:1 (stonith:external/vmware): Stopped child_DoFencing:2 (stonith:external/vmware): Started sles-1
The DC (Designated Controller) node is where all the decisions are made and if the current DC fails a new one is elected from the remaining cluster nodes. The choice of DC is of no significance to an administrator beyond the fact that its logs will generally be more interesting.
How Should the Configuration be Updated? There are three basic rules for updating the cluster configuration: Rule 1 - Never edit the cib.xml file manually. Ever. I'm not making this up. Rule 2 - Read Rule 1 again. Rule 3 - The cluster will notice if you ignored rules 1 & 2 and refuse to use the configuration. Now that it is clear how NOT to update the configuration, we can begin to explain how you should. The most powerful tool for modifying the configuration is the cibadmin command which talks to a running cluster. With cibadmin, the user can query, add, remove, update or replace any part of the configuration and all changes take effect immediately so there is no need to perform a reload-like operation. The simplest way of using cibadmin is to us it to save the current configuration to a temporary file, edit that file with your favorite text or XML editor and then upload the revised configuration.
Safely using an editor to modify the cluster configuration cibadmin --query > tmp.xml vi tmp.xml cibadmin --replace --xml-file tmp.xml
Some of the better XML editors can make use of a Relax NG schema to help make sure any changes you make are valid. The schema describing the configuration can normally be found in /usr/lib/heartbeat/pacemaker.rng on most systems. If you only wanted to modify the resources section, you could instead do
Safely using an editor to modify a subsection of the cluster configuration cibadmin --query --obj_type resources > tmp.xml vi tmp.xml cibadmin --replace --obj_type resources --xml-file tmp.xml
to avoid modifying any other part of the configuration.
Quickly Deleting Part of the Configuration Identify the object you wish to delete. eg. Next identify the resource's tag name and id (in this case we'll chose primitive and child_DoFencing). Then simply execute: cibadmin --delete --crm_xml ‘<primitive id="child_DoFencing"/>'
Updating the Configuration Without Using XML Some common tasks can also be performed with one of the higher level tools that avoid the need to read or edit XML. To enable stonith for example, one could run: crm_attribute --attr-name stonith-enabled --attr-value true Or to see if somenode is allowed to run resources, there is: crm_standby --get-value --node-uname somenode Or to find the current location of my-test-rsc one can use: crm_resource --locate --resource my-test-rsc
Making Configuration Changes in a Sandbox Often it is desirable to preview the effects of a series of changes before updating the configuration atomically. For this purpose we have created crm_shadow which creates a "shadow" copy of the configuration and arranges for all the command line tools to use it. To begin, simply invoke crm_shadow and give it the name of a configuration to create Shadow copies are identified with a name, making it possible to have more than one and be sure to follow the simple on-screen instructions. Read the above carefully, failure to do so could result in you destroying the cluster's active configuration
Creating and displaying the active sandbox # crm_shadow --create test Setting up shadow instance Type Ctrl-D to exit the crm_shadow shell shadow[test]: shadow[test] # crm_shadow --which test
From this point on, all cluster commands will automatically use the shadow copy instead of talking to the cluster's active configuration. Once you have finished experimenting, you can either commit the changes, or discard them as shown below. Again, be sure to follow the on-screen instructions carefully. For a full list of crm_shadow options and commands, invoke it with the --help option. Using a sandbox to make multiple changes atomically shadow[test] # crm_failcount -G -r rsc_c001n01 name=fail-count-rsc_c001n01 value=0 shadow[test] # crm_standby -v on -n c001n02 shadow[test] # crm_standby -G -n c001n02 name=c001n02 scope=nodes value=on shadow[test] # cibadmin --erase --force shadow[test] # cibadmin --query ]]> shadow[test] # crm_shadow --delete test --force Now type Ctrl-D to exit the crm_shadow shell shadow[test] # exit # crm_shadow --which No shadow instance provided # cibadmin -Q ]]> Making changes in a sandbox and verifying the real configuration is untouched
Testing Your Configuration Changes We saw previously how to make a series of changes to a "shadow" copy of the configuration. Before loading the changes back into the cluster (eg. crm_shadow --commit mytest --force), it is often advisable to simulate the effect of the changes with ptest. Eg. ptest --live-check -VVVVV --save-graph tmp.graph --save-dotfile tmp.dot The tool uses the same library as the live cluster to show what it would have done given the supplied input. It's output, in addition to a significant amount of logging, is stored in two files tmp.graph and tmp.dot, both are representations of the same thing -- the cluster's response to your changes. In the graph file is stored the complete transition, containing a list of all the actions, their parameters and their pre-requisites. Because the transition graph is not terribly easy to read, the tool also generates a Graphviz dot-file representing the same information.
Small Cluster Transition An example transition graph as represented by Graphviz
Interpreting the Graphviz output Arrows indicate ordering dependencies Dashed-arrows indicate dependencies that are not present in the transition graph Actions with a dashed border of any color do not form part of the transition graph Actions with a green border form part of the transition graph Actions with a red border are ones the cluster would like to execute but are unrunnable Actions with a blue border are ones the cluster does not feel need to be executed Actions with orange text are pseudo/pretend actions that the cluster uses to simplify the graph Actions with black text are sent to the LRM - Resource actions have text of the form {rsc}_{action}_{interval} {node} + Resource actions have text of the form rsc_action_interval node Any action depending on an action with a red border will not be able to execute. Loops are really bad. Please report them to the development team. In the above example, it appears that a new node, node2, has come online and that the cluster is checking to make sure rsc1, rsc2 and rsc3 are not already running there (Indicated by the *_monitor_0 entries). Once it did that, and assuming the resources were not active there, it would have liked to stop rsc1 and rsc2 on node1 and move them to node2. However, there appears to be some problem and the cluster cannot or is not permitted to perform the stop actions which implies it also cannot perform the start actions. For some reason the cluster does not want to start rsc3 anywhere. For information on the options supported by ptest, use ptest --help
Complex Cluster Transition Another, slightly more complex, transition graph that you're not expected to be able to read
Do I Need to Update the Configuration on all Cluster Nodes? No. Any changes are immediately synchronized to the other active members of the cluster. To reduce bandwidth, the cluster only broadcasts the incremental updates that result from your changes and uses MD5 sums to ensure that each copy is completely consistent.
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Nodes.xml b/doc/Pacemaker_Explained/en-US/Ch-Nodes.xml index a867b73931..5476718d98 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Nodes.xml +++ b/doc/Pacemaker_Explained/en-US/Ch-Nodes.xml @@ -1,109 +1,132 @@ Cluster Nodes
Defining a Cluster Node Each node in the cluster will have an entry in the nodes section containing its UUID, uname and type. Example cluster node entry - ]]> + ]]> In normal circumstances, the admin should let the cluster populate this information automatically from the communications and membership data. However one can use the crm_uuid tool to read an existing UUID or define a value before the cluster starts.
Describing a Cluster Node Beyond the basic definition of a node, the administrator can also describe the node's attributes, such as how much RAM, disk, what OS or kernel version it has, perhaps even its physical location. This information can then be used by the cluster when deciding where to place resources. For more information on the use of node attributes, see the section on . Node attributes can be specified ahead of time or populated later, when the cluster is running, using crm_attribute. Below is what the node's definition would look like if the admin ran the command:
- The result of using crm_attribute to specify which kernel sles-1 is running + The result of using crm_attribute to specify which kernel pcmk-1 is running - crm_attribute --type nodes --node-uname sles-1 --attr-name kernel --attr-value `uname -r` + crm_attribute --type nodes --node-uname pcmk-1 --attr-name kernel --attr-value `uname -r` + ]]>
A simpler way to determine the current value of an attribute is to use crm_attribute command again: - crm_attribute --type nodes --node-uname sles-1 --attr-name kernel --get-value + crm_attribute --type nodes --node-uname pcmk-1 --attr-name kernel --get-value By specifying --type nodes the admin tells the cluster that this attribute is persistent. There are also transient attributes which are kept in the status section which are "forgotten" whenever the node rejoins the cluster. The cluster uses this area to store a record of how many times a resource has failed on that node but administrators can also read and write to this section by specifying --type status.
Adding a New Cluster Node
OpenAIS Adding a new is as simple as installing OpenAIS and Pacemaker, and copying /etc/ais/openais.conf and /etc/ais/authkey (if it exists) from an existing node. You may need to modify the mcastaddr option to match the new node's IP address. If a log message containing "Invalid digest" appears from OpenAIS, the keys are not consistent between the machines.
Heartbeat Provided you specified autojoin any in ha.cf, adding a new is as simple as installing heartbeat and copying ha.cf and authkeys from an existing node. If not, then after setting up ha.cf and authkeys, you must use the hb_addnode command before starting the new node.
Removing a Cluster Node
OpenAIS - TBA + + Because the messaging and membership layers are the authoritative source for cluster nodes, deleting them from the CIB is not a reliable solution. + First one must arrange for heartbeat to forget about the node (pcmk-1 in the example below). + + On the host to be removed: + + + Find and record the node's OpenAIS id: crm_node -i + + + Stop the cluster: /etc/init.d/openais stop + + + Next, from one of the remaining active cluster nodes: + + + Tell the cluster to forget about the removed host: crm_node -R OPENAIS_ID + + + Only now is it safe to delete the node from the CIB with: + cibadmin --delete --obj_type nodes --crm_xml '<node uname="pcmk-1"/>' + cibadmin --delete --obj_type status --crm_xml '<node_status uname="pcmk-1"/>' + +
Heartbeat Because the messaging and membership layers are the authoritative source for cluster nodes, deleting them from the CIB is not a reliable solution. - First one must arrange for heartbeat to forget about the node (sles-1 in the example below). + First one must arrange for heartbeat to forget about the node (pcmk-1 in the example below). To do this, shut down heartbeat on the node and then, from one of the remaining active cluster nodes, run: - hb_delnode sles-1 + hb_delnode pcmk-1 Only then is it safe to delete the node from the CIB with: - cibadmin --delete --obj_type nodes --crm_xml ‘<node uname="sles-1"/>' - cibadmin --delete --obj_type status --crm_xml ‘<node_status uname="sles-1"/>' + cibadmin --delete --obj_type nodes --crm_xml '<node uname="pcmk-1"/>' + cibadmin --delete --obj_type status --crm_xml '<node_status uname="pcmk-1"/>'
Replacing a Cluster Node
OpenAIS The five-step guide to replacing an existing cluster node: Make sure the old node is completely stopped Give the new machine the same hostname and IP address as the old one Install the cluster software :-) Copy /etc/ais/openais.conf and /etc/ais/authkey (if it exists) to the new node Start the new cluster node If a log message containing "Invalid digest" appears from OpenAIS, the keys are not consistent between the machines.
Heartbeat The seven-step guide to replacing an existing cluster node: Make sure the old node is completely stopped Give the new machine the same hostname as the old one Go to an active cluster node and look up the UUID for the old node in /var/lib/heartbeat/hostcache Install the cluster software Copy ha.cf and authkeys to the new node On the new node, populate it's UUID using crm_uuid -w and the UUID from step 2 Start the new cluster node
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Stonith.xml b/doc/Pacemaker_Explained/en-US/Ch-Stonith.xml index 23ea52f015..c5ef7570e5 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Stonith.xml +++ b/doc/Pacemaker_Explained/en-US/Ch-Stonith.xml @@ -1,84 +1,84 @@ Protecting Your Data - STONITH
Why You Need STONITH STONITH is an acronym for Shoot-The-Other-Node-In-The-Head and it protects your data from being corrupted by rouge nodes or concurrent access. Just because a node is unresponsive, this doesn't mean it isn't accessing your data. The only way to be 100% sure that your data is safe, is to use STONITH so we can be certain that the node is truly offline, before allowing the data to be accessed from another node. STONITH also has a role to play in the event that a clustered service cannot be stopped. In this case, the cluster uses STONITH to force the whole node offline, thereby making it safe to start the service elsewhere.
What STONITH Device Should You Use It is crucial that the STONITH device can allow the cluster to differentiate between a node failure and a network one. The biggest mistake people make in choosing a STONITH device is to use remote power switch (such as many on-board IMPI controllers) that shares power with the node it controls. In such cases, the cluster cannot be sure if the node is really offline, or active and suffering from a network fault. Likewise, any device that relies on the machine being active (such as SSH-based "devices" used during testing) are inappropriate.
Configuring STONITH Find the correct driver: stonith -L Since every device is different, the parameters needed to configure it will vary. - To find out the parameters required by the device: stonith -t {type} -n + To find out the parameters required by the device: stonith -t type -n Hopefully the developers chose names that make sense, if not you can query for some additional information by finding an active cluster node and running: - lrmadmin -M stonith {type} pacemaker + lrmadmin -M stonith type pacemaker The output should be XML formatted text containing additional parameter descriptions - Create a file called stonith.xml containing a primitive resource with a class of stonith, a type of {type} and a parameter for each of the values returned in step 2 + Create a file called stonith.xml containing a primitive resource with a class of stonith, a type of type and a parameter for each of the values returned in step 2 Create a clone from the primitive resource if the device can shoot more than one node and supports multiple simultaneous connections. Upload it into the CIB using cibadmin: cibadmin -C -o resources --xml-file stonith.xml
Example Assuming we have an IBM BladeCenter consisting of four nodes and the management interface is active on 10.0.0.1, then we would chose the external/ibmrsa driver in step 2 and obtain the following list of parameters
Obtaining a list of STONITH Parameters stonith -t external/ibmrsa -n hostname ipaddr userid passwd type
from which we would create a STONITH resource fragment that might look like this Sample STONITH Resource ]]>
diff --git a/pengine/clone.c b/pengine/clone.c index 1aebac844f..e3254b15d0 100644 --- a/pengine/clone.c +++ b/pengine/clone.c @@ -1,1275 +1,1329 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #define VARIANT_CLONE 1 #include gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set); void child_stopping_constraints( clone_variant_data_t *clone_data, resource_t *self, resource_t *child, resource_t *last, pe_working_set_t *data_set); void child_starting_constraints( clone_variant_data_t *clone_data, resource_t *self, resource_t *child, resource_t *last, pe_working_set_t *data_set); static node_t * parent_node_instance(const resource_t *rsc, node_t *node) { node_t *ret = NULL; if(node != NULL) { ret = pe_find_node_id( rsc->parent->allowed_nodes, node->details->id); } return ret; } static gboolean did_fail(const resource_t *rsc) { if(is_set(rsc->flags, pe_rsc_failed)) { return TRUE; } slist_iter( child_rsc, resource_t, rsc->children, lpc, if(did_fail(child_rsc)) { return TRUE; } ); return FALSE; } gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set) { int level = LOG_DEBUG_3; node_t *node1 = NULL; node_t *node2 = NULL; gboolean can1 = TRUE; gboolean can2 = TRUE; gboolean with_scores = TRUE; const resource_t *resource1 = (const resource_t*)a; const resource_t *resource2 = (const resource_t*)b; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); /* allocation order: * - active instances * - instances running on nodes with the least copies * - active instances on nodes that cant support them or are to be fenced * - failed instances * - inactive instances */ do_crm_log_unlikely(level+1, "%s ? %s", resource1->id, resource2->id); if(resource1->running_on && resource2->running_on) { if(g_list_length(resource1->running_on) < g_list_length(resource2->running_on)) { do_crm_log_unlikely(level, "%s < %s: running_on", resource1->id, resource2->id); return -1; } else if(g_list_length(resource1->running_on) > g_list_length(resource2->running_on)) { do_crm_log_unlikely(level, "%s > %s: running_on", resource1->id, resource2->id); return 1; } } if(resource1->running_on) { node1 = resource1->running_on->data; } if(resource2->running_on) { node2 = resource2->running_on->data; } if(node1) { node_t *match = pe_find_node_id(resource1->allowed_nodes, node1->details->id); if(match == NULL || match->weight < 0) { do_crm_log_unlikely(level, "%s: current location is unavailable", resource1->id); node1 = NULL; can1 = FALSE; } } if(node2) { node_t *match = pe_find_node_id(resource2->allowed_nodes, node2->details->id); if(match == NULL || match->weight < 0) { do_crm_log_unlikely(level, "%s: current location is unavailable", resource2->id); node2 = NULL; can2 = FALSE; } } if(can1 != can2) { if(can1) { do_crm_log_unlikely(level, "%s < %s: availability of current location", resource1->id, resource2->id); return -1; } do_crm_log_unlikely(level, "%s > %s: availability of current location", resource1->id, resource2->id); return 1; } if(resource1->priority < resource2->priority) { do_crm_log_unlikely(level, "%s < %s: priority", resource1->id, resource2->id); return 1; } else if(resource1->priority > resource2->priority) { do_crm_log_unlikely(level, "%s > %s: priority", resource1->id, resource2->id); return -1; } if(node1 == NULL && node2 == NULL) { do_crm_log_unlikely(level, "%s == %s: not active", resource1->id, resource2->id); return 0; } if(node1 != node2) { if(node1 == NULL) { do_crm_log_unlikely(level, "%s > %s: active", resource1->id, resource2->id); return 1; } else if(node2 == NULL) { do_crm_log_unlikely(level, "%s < %s: active", resource1->id, resource2->id); return -1; } } can1 = can_run_resources(node1); can2 = can_run_resources(node2); if(can1 != can2) { if(can1) { do_crm_log_unlikely(level, "%s < %s: can", resource1->id, resource2->id); return -1; } do_crm_log_unlikely(level, "%s > %s: can", resource1->id, resource2->id); return 1; } node1 = parent_node_instance(resource1, node1); node2 = parent_node_instance(resource2, node2); if(node1 != NULL && node2 == NULL) { do_crm_log_unlikely(level, "%s < %s: not allowed", resource1->id, resource2->id); return -1; } else if(node1 == NULL && node2 != NULL) { do_crm_log_unlikely(level, "%s > %s: not allowed", resource1->id, resource2->id); return 1; } if(node1 == NULL) { do_crm_log_unlikely(level, "%s == %s: not allowed", resource1->id, resource2->id); return 0; } if(node1->count < node2->count) { do_crm_log_unlikely(level, "%s < %s: count", resource1->id, resource2->id); return -1; } else if(node1->count > node2->count) { do_crm_log_unlikely(level, "%s > %s: count", resource1->id, resource2->id); return 1; } if(with_scores) { int max = 0; int lpc = 0; GListPtr list1 = node_list_dup(resource1->allowed_nodes, FALSE, FALSE); GListPtr list2 = node_list_dup(resource2->allowed_nodes, FALSE, FALSE); list1 = g_list_sort_with_data(list1, sort_node_weight, data_set); list2 = g_list_sort_with_data(list2, sort_node_weight, data_set); max = g_list_length(list1); if(max < g_list_length(list2)) { max = g_list_length(list2); } for(;lpc < max; lpc++) { node1 = g_list_nth_data(list1, lpc); node2 = g_list_nth_data(list2, lpc); if(node1 == NULL) { do_crm_log_unlikely(level, "%s < %s: node score NULL", resource1->id, resource2->id); pe_free_shallow(list1); pe_free_shallow(list2); return 1; } else if(node2 == NULL) { do_crm_log_unlikely(level, "%s > %s: node score NULL", resource1->id, resource2->id); pe_free_shallow(list1); pe_free_shallow(list2); return -1; } if(node1->weight < node2->weight) { do_crm_log_unlikely(level, "%s < %s: node score", resource1->id, resource2->id); pe_free_shallow(list1); pe_free_shallow(list2); return 1; } else if(node1->weight > node2->weight) { do_crm_log_unlikely(level, "%s > %s: node score", resource1->id, resource2->id); pe_free_shallow(list1); pe_free_shallow(list2); return -1; } } pe_free_shallow(list1); pe_free_shallow(list2); } can1 = did_fail(resource1); can2 = did_fail(resource2); if(can1 != can2) { if(can1) { do_crm_log_unlikely(level, "%s > %s: failed", resource1->id, resource2->id); return 1; } do_crm_log_unlikely(level, "%s < %s: failed", resource1->id, resource2->id); return -1; } if(node1 && node2) { int max = 0; int lpc = 0; GListPtr list1 = g_list_append(NULL, node_copy(resource1->running_on->data)); GListPtr list2 = g_list_append(NULL, node_copy(resource2->running_on->data)); /* Possibly a replacement for the with_scores block above */ slist_iter( constraint, rsc_colocation_t, resource1->parent->rsc_cons_lhs, lpc, do_crm_log_unlikely(level+1, "Applying %s to %s", constraint->id, resource1->id); list1 = native_merge_weights( constraint->rsc_lh, resource1->id, list1, constraint->node_attribute, constraint->score/INFINITY, FALSE); ); slist_iter( constraint, rsc_colocation_t, resource2->parent->rsc_cons_lhs, lpc, do_crm_log_unlikely(level+1, "Applying %s to %s", constraint->id, resource2->id); list2 = native_merge_weights( constraint->rsc_lh, resource2->id, list2, constraint->node_attribute, constraint->score/INFINITY, FALSE); ); list1 = g_list_sort_with_data(list1, sort_node_weight, data_set); list2 = g_list_sort_with_data(list2, sort_node_weight, data_set); max = g_list_length(list1); if(max < g_list_length(list2)) { max = g_list_length(list2); } for(;lpc < max; lpc++) { node1 = g_list_nth_data(list1, lpc); node2 = g_list_nth_data(list2, lpc); if(node1 == NULL) { do_crm_log_unlikely(level, "%s < %s: colocated score NULL", resource1->id, resource2->id); pe_free_shallow(list1); pe_free_shallow(list2); return 1; } else if(node2 == NULL) { do_crm_log_unlikely(level, "%s > %s: colocated score NULL", resource1->id, resource2->id); pe_free_shallow(list1); pe_free_shallow(list2); return -1; } if(node1->weight < node2->weight) { do_crm_log_unlikely(level, "%s < %s: colocated score", resource1->id, resource2->id); pe_free_shallow(list1); pe_free_shallow(list2); return 1; } else if(node1->weight > node2->weight) { do_crm_log_unlikely(level, "%s > %s: colocated score", resource1->id, resource2->id); pe_free_shallow(list1); pe_free_shallow(list2); return -1; } } pe_free_shallow(list1); pe_free_shallow(list2); } do_crm_log_unlikely(level, "%s == %s: default %d", resource1->id, resource2->id, node2->weight); return 0; } static node_t * can_run_instance(resource_t *rsc, node_t *node) { node_t *local_node = NULL; clone_variant_data_t *clone_data = NULL; if(can_run_resources(node) == FALSE) { goto bail; } else if(is_set(rsc->flags, pe_rsc_orphan)) { goto bail; } local_node = parent_node_instance(rsc, node); get_clone_variant_data(clone_data, rsc->parent); if(local_node == NULL) { crm_warn("%s cannot run on %s: node not allowed", rsc->id, node->details->uname); goto bail; } else if(local_node->count < clone_data->clone_node_max) { return local_node; } else { crm_debug_2("%s cannot run on %s: node full", rsc->id, node->details->uname); } bail: if(node) { common_update_score(rsc, node->details->id, -INFINITY); } return NULL; } static node_t * color_instance(resource_t *rsc, pe_working_set_t *data_set) { node_t *chosen = NULL; node_t *local_node = NULL; crm_debug_2("Processing %s", rsc->id); if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->fns->location(rsc, NULL, FALSE); } else if(is_set(rsc->flags, pe_rsc_allocating)) { crm_debug("Dependancy loop detected involving %s", rsc->id); return NULL; } if(rsc->allowed_nodes) { slist_iter(try_node, node_t, rsc->allowed_nodes, lpc, can_run_instance(rsc, try_node); ); } chosen = rsc->cmds->color(rsc, data_set); if(chosen) { local_node = pe_find_node_id( rsc->parent->allowed_nodes, chosen->details->id); if(local_node) { local_node->count++; } else if(is_set(rsc->flags, pe_rsc_managed)) { /* what to do? we can't enforce per-node limits in this case */ crm_config_err("%s not found in %s (list=%d)", chosen->details->id, rsc->parent->id, g_list_length(rsc->parent->allowed_nodes)); } } return chosen; } static void append_parent_colocation(resource_t *rsc, resource_t *child, gboolean all) { slist_iter(cons, rsc_colocation_t, rsc->rsc_cons, lpc, if(all || cons->score < 0 || cons->score == INFINITY) { child->rsc_cons = g_list_append(child->rsc_cons, cons); } ); slist_iter(cons, rsc_colocation_t, rsc->rsc_cons_lhs, lpc, if(all || cons->score < 0) { child->rsc_cons_lhs = g_list_append(child->rsc_cons_lhs, cons); } ); } node_t * clone_color(resource_t *rsc, pe_working_set_t *data_set) { int allocated = 0; int available_nodes = 0; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if(is_not_set(rsc->flags, pe_rsc_provisional)) { return NULL; } else if(is_set(rsc->flags, pe_rsc_allocating)) { crm_debug("Dependancy loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); crm_debug_2("Processing %s", rsc->id); /* this information is used by sort_clone_instance() when deciding in which * order to allocate clone instances */ slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons_lhs, lpc, rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights( constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, constraint->score/INFINITY, TRUE); ); dump_node_scores(show_scores?0:scores_log_level, rsc, __FUNCTION__, rsc->allowed_nodes); /* count now tracks the number of clones currently allocated */ slist_iter(node, node_t, rsc->allowed_nodes, lpc, node->count = 0; ); slist_iter(child, resource_t, rsc->children, lpc, if(g_list_length(child->running_on) > 0) { node_t *child_node = child->running_on->data; node_t *local_node = parent_node_instance( child, child->running_on->data); if(local_node) { local_node->count++; } else { crm_err("%s is running on %s which isn't allowed", child->id, child_node->details->uname); } } ); rsc->children = g_list_sort_with_data(rsc->children, sort_clone_instance, data_set); /* count now tracks the number of clones we have allocated */ slist_iter(node, node_t, rsc->allowed_nodes, lpc, node->count = 0; ); rsc->allowed_nodes = g_list_sort_with_data( rsc->allowed_nodes, sort_node_weight, data_set); slist_iter(node, node_t, rsc->allowed_nodes, lpc, if(can_run_resources(node)) { available_nodes++; } ); slist_iter(child, resource_t, rsc->children, lpc, if(allocated >= clone_data->clone_max) { crm_debug("Child %s not allocated - limit reached", child->id); resource_location(child, NULL, -INFINITY, "clone_color:limit_reached", data_set); } else if (clone_data->clone_max < available_nodes) { /* Only include positive colocation preferences of dependant resources * if not every node will get a copy of the clone */ append_parent_colocation(rsc, child, TRUE); } else { append_parent_colocation(rsc, child, FALSE); } if(color_instance(child, data_set)) { allocated++; } ); crm_debug("Allocated %d %s instances of a possible %d", allocated, rsc->id, clone_data->clone_max); clear_bit(rsc->flags, pe_rsc_provisional); clear_bit(rsc->flags, pe_rsc_allocating); return NULL; } static void clone_update_pseudo_status( resource_t *rsc, gboolean *stopping, gboolean *starting, gboolean *active) { if(rsc->children) { slist_iter(child, resource_t, rsc->children, lpc, clone_update_pseudo_status(child, stopping, starting, active) ); return; } CRM_ASSERT(active != NULL); CRM_ASSERT(starting != NULL); CRM_ASSERT(stopping != NULL); if(rsc->running_on) { *active = TRUE; } slist_iter( action, action_t, rsc->actions, lpc, if(*starting && *stopping) { return; } else if(action->optional) { crm_debug_3("Skipping optional: %s", action->uuid); continue; } else if(action->pseudo == FALSE && action->runnable == FALSE){ crm_debug_3("Skipping unrunnable: %s", action->uuid); continue; } else if(safe_str_eq(RSC_STOP, action->task)) { crm_debug_2("Stopping due to: %s", action->uuid); *stopping = TRUE; } else if(safe_str_eq(RSC_START, action->task)) { if(action->runnable == FALSE) { crm_debug_3("Skipping pseudo-op: %s run=%d, pseudo=%d", action->uuid, action->runnable, action->pseudo); } else { crm_debug_2("Starting due to: %s", action->uuid); crm_debug_3("%s run=%d, pseudo=%d", action->uuid, action->runnable, action->pseudo); *starting = TRUE; } } ); } static action_t * find_rsc_action(resource_t *rsc, const char *key, gboolean active_only, GListPtr *list) { action_t *match = NULL; GListPtr possible = NULL; GListPtr active = NULL; possible = find_actions(rsc->actions, key, NULL); if(active_only) { slist_iter(op, action_t, possible, lpc, if(op->optional == FALSE) { active = g_list_append(active, op); } ); if(active && g_list_length(active) == 1) { match = g_list_nth_data(active, 0); } if(list) { *list = active; active = NULL; } } else if(possible && g_list_length(possible) == 1) { match = g_list_nth_data(possible, 0); } if(list) { *list = possible; possible = NULL; } if(possible) { g_list_free(possible); } if(active) { g_list_free(active); } return match; } static void child_ordering_constraints(resource_t *rsc, pe_working_set_t *data_set) { char *key = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *last_stop = NULL; action_t *last_start = NULL; gboolean active_only = TRUE; /* change to false to get the old behavior */ clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if(clone_data->ordered == FALSE) { return; } slist_iter( child, resource_t, rsc->children, lpc, key = stop_key(child); stop = find_rsc_action(child, key, active_only, NULL); crm_free(key); key = start_key(child); start = find_rsc_action(child, key, active_only, NULL); crm_free(key); if(stop) { if(last_stop) { /* child/child relative stop */ order_actions(stop, last_stop, pe_order_implies_left); } last_stop = stop; } if(start) { if(last_start) { /* child/child relative start */ order_actions(last_start, start, pe_order_implies_left); } last_start = start; } ); } void clone_create_actions(resource_t *rsc, pe_working_set_t *data_set) { gboolean child_active = FALSE; gboolean child_starting = FALSE; gboolean child_stopping = FALSE; action_t *stop = NULL; action_t *stopped = NULL; action_t *start = NULL; action_t *started = NULL; resource_t *last_start_rsc = NULL; resource_t *last_stop_rsc = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); crm_debug_2("Creating actions for %s", rsc->id); slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->create_actions(child_rsc, data_set); clone_update_pseudo_status( child_rsc, &child_stopping, &child_starting, &child_active); if(is_set(child_rsc->flags, pe_rsc_starting)) { last_start_rsc = child_rsc; } if(is_set(child_rsc->flags, pe_rsc_stopping)) { last_stop_rsc = child_rsc; } ); /* start */ start = start_action(rsc, NULL, !child_starting); started = custom_action(rsc, started_key(rsc), RSC_STARTED, NULL, !child_starting, TRUE, data_set); start->pseudo = TRUE; start->runnable = TRUE; started->pseudo = TRUE; started->priority = INFINITY; if(child_active || child_starting) { started->runnable = TRUE; } child_ordering_constraints(rsc, data_set); child_starting_constraints(clone_data, rsc, NULL, last_start_rsc, data_set); clone_data->start_notify = create_notification_boundaries(rsc, RSC_START, start, started, data_set); /* stop */ stop = stop_action(rsc, NULL, !child_stopping); stopped = custom_action(rsc, stopped_key(rsc), RSC_STOPPED, NULL, !child_stopping, TRUE, data_set); stop->pseudo = TRUE; stop->runnable = TRUE; stopped->pseudo = TRUE; stopped->runnable = TRUE; stopped->priority = INFINITY; child_stopping_constraints(clone_data, rsc, NULL, last_stop_rsc, data_set); clone_data->stop_notify = create_notification_boundaries(rsc, RSC_STOP, stop, stopped, data_set); if(clone_data->stop_notify && clone_data->start_notify) { order_actions(clone_data->stop_notify->post_done, clone_data->start_notify->pre, pe_order_optional); } } void child_starting_constraints( clone_variant_data_t *clone_data, resource_t *rsc, resource_t *child, resource_t *last, pe_working_set_t *data_set) { if(child == NULL && last == NULL) { crm_debug("%s has no active children", rsc->id); return; } if(child != NULL) { order_start_start( rsc, child, pe_order_runnable_left|pe_order_implies_left_printed); new_rsc_order(child, RSC_START, rsc, RSC_STARTED, pe_order_implies_right_printed, data_set); } if(FALSE && clone_data->ordered) { if(child == NULL) { /* last child start before global started */ new_rsc_order(last, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left, data_set); } else if(last == NULL) { /* global start before first child start */ order_start_start( rsc, child, pe_order_implies_left); } else { /* child/child relative start */ order_start_start(last, child, pe_order_implies_left); } } } void child_stopping_constraints( clone_variant_data_t *clone_data, resource_t *rsc, resource_t *child, resource_t *last, pe_working_set_t *data_set) { if(child == NULL && last == NULL) { crm_debug("%s has no active children", rsc->id); return; } if(child != NULL) { order_stop_stop(rsc, child, pe_order_shutdown|pe_order_implies_left_printed); new_rsc_order(child, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_right_printed, data_set); } if(FALSE && clone_data->ordered) { if(last == NULL) { /* first child stop before global stopped */ new_rsc_order(child, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left, data_set); } else if(child == NULL) { /* global stop before last child stop */ order_stop_stop( rsc, last, pe_order_implies_left); } else { /* child/child relative stop */ order_stop_stop(child, last, pe_order_implies_left); } } } void clone_internal_constraints(resource_t *rsc, pe_working_set_t *data_set) { resource_t *last_rsc = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); native_internal_constraints(rsc, data_set); /* global stop before stopped */ new_rsc_order(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left, data_set); /* global start before started */ new_rsc_order(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left, data_set); /* global stopped before start */ new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->internal_constraints(child_rsc, data_set); child_starting_constraints( clone_data, rsc, child_rsc, last_rsc, data_set); child_stopping_constraints( clone_data, rsc, child_rsc, last_rsc, data_set); last_rsc = child_rsc; ); } resource_t* find_compatible_child( resource_t *local_child, resource_t *rsc, enum rsc_role_e filter, gboolean current) { node_t *local_node = NULL; node_t *node = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); local_node = local_child->fns->location(local_child, NULL, current); if(local_node == NULL) { crm_debug("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id); return NULL; } slist_iter( child_rsc, resource_t, rsc->children, lpc, enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current); node = child_rsc->fns->location(child_rsc, NULL, current); if(filter != RSC_ROLE_UNKNOWN && next_role != filter) { crm_debug_2("Filtered %s", child_rsc->id); continue; } if(node && local_node && node->details == local_node->details) { crm_info("Colocating %s with %s on %s", local_child->id, child_rsc->id, node->details->uname); return child_rsc; } ); crm_debug("Can't colocate child %s with %s", local_child->id, rsc->id); return NULL; } void clone_rsc_colocation_lh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { gboolean do_interleave = FALSE; resource_t *rsc = constraint->rsc_lh; clone_variant_data_t *clone_data = NULL; clone_variant_data_t *clone_data_rh = NULL; if(rsc == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if(constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } else { crm_debug_4("Processing constraints from %s", rsc->id); } get_clone_variant_data(clone_data, rsc); if(constraint->rsc_rh->variant == pe_clone || constraint->rsc_rh->variant == pe_master) { get_clone_variant_data( clone_data_rh, constraint->rsc_rh); if(clone_data->clone_node_max != clone_data_rh->clone_node_max) { crm_config_err("Cannot interleave "XML_CIB_TAG_INCARNATION " %s and %s because" " they do not support the same number of" " resources per node", constraint->rsc_lh->id, constraint->rsc_rh->id); /* only the LHS side needs to be labeled as interleave */ } else if(clone_data->interleave) { do_interleave = TRUE; } else if(constraint->score >= INFINITY) { GListPtr lhs = NULL, rhs = NULL; lhs = rsc_lh->allowed_nodes; slist_iter( child_rsc, resource_t, rsc_rh->children, lpc, node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if(chosen != NULL) { rhs = g_list_append(rhs, chosen); } ); rsc_lh->allowed_nodes = node_list_exclude(lhs, rhs); pe_free_shallow_adv(rhs, FALSE); pe_free_shallow(lhs); return; } } else if(constraint->score >= INFINITY) { crm_config_err("Manditory co-location of clones (%s) with other" " non-clone (%s) resources is not supported", rsc_lh->id, rsc_rh->id); return; } if(do_interleave) { resource_t *rh_child = NULL; slist_iter(lh_child, resource_t, rsc->children, lpc, CRM_ASSERT(lh_child != NULL); rh_child = find_compatible_child( lh_child, rsc_rh, RSC_ROLE_UNKNOWN, FALSE); if(rh_child == NULL) { crm_debug_2("No match found for %s", lh_child->id); continue; } crm_debug("Interleaving %s with %s", lh_child->id, rh_child->id); lh_child->cmds->rsc_colocation_lh( lh_child, rh_child, constraint); ); return; } slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->rsc_colocation_lh(child_rsc, constraint->rsc_rh, constraint); ); } void clone_rsc_colocation_rh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { clone_variant_data_t *clone_data = NULL; CRM_CHECK(rsc_lh != NULL, return); CRM_CHECK(rsc_lh->variant == pe_native, return); get_clone_variant_data(clone_data, rsc_rh); crm_debug_3("Processing constraint %s: %d", constraint->id, constraint->score); if(rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } else if(is_set(rsc_rh->flags, pe_rsc_provisional)) { crm_debug_3("%s is still provisional", rsc_rh->id); return; } else if(constraint->score >= INFINITY) { GListPtr lhs = NULL, rhs = NULL; lhs = rsc_lh->allowed_nodes; slist_iter( child_rsc, resource_t, rsc_rh->children, lpc, node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if(chosen != NULL) { rhs = g_list_append(rhs, chosen); } ); rsc_lh->allowed_nodes = node_list_exclude(lhs, rhs); pe_free_shallow_adv(rhs, FALSE); pe_free_shallow(lhs); return; } slist_iter( child_rsc, resource_t, rsc_rh->children, lpc, child_rsc->cmds->rsc_colocation_rh(rsc_lh, child_rsc, constraint); ); } void clone_rsc_order_lh(resource_t *rsc, order_constraint_t *order, pe_working_set_t *data_set) { resource_t *r1 = NULL; resource_t *r2 = NULL; gboolean do_interleave = FALSE; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); crm_debug_4("%s->%s", order->lh_action_task, order->rh_action_task); if(order->rh_rsc == NULL) { order->lh_action_task = convert_non_atomic_task(order->lh_action_task, FALSE, TRUE); native_rsc_order_lh(rsc, order, data_set); return; } r1 = uber_parent(rsc); r2 = uber_parent(order->rh_rsc); if(r1 == r2) { native_rsc_order_lh(rsc, order, data_set); return; } if(order->rh_rsc->variant == pe_clone || order->rh_rsc->variant == pe_master) { clone_variant_data_t *clone_data_rh = NULL; get_clone_variant_data(clone_data_rh, order->rh_rsc); if(clone_data->clone_node_max != clone_data_rh->clone_node_max) { crm_config_err("Cannot interleave "XML_CIB_TAG_INCARNATION " %s and %s because they do not support the same" " number of resources per node", rsc->id, order->rh_rsc->id); /* only the LHS side needs to be labeled as interleave */ } else if(clone_data->interleave) { do_interleave = TRUE; } } if(order->rh_rsc == NULL) { do_interleave = FALSE; } if(do_interleave) { resource_t *lh_child = NULL; resource_t *rh_saved = order->rh_rsc; gboolean current = FALSE; if(strstr(order->lh_action_task, "_stop_0") || strstr(order->lh_action_task, "_demote_0")) { current = TRUE; } slist_iter( rh_child, resource_t, rh_saved->children, lpc, CRM_ASSERT(rh_child != NULL); lh_child = find_compatible_child(rh_child, rsc, RSC_ROLE_UNKNOWN, current); if(lh_child == NULL) { crm_debug_2("No match found for %s", rh_child->id); continue; } crm_debug("Interleaving %s with %s", lh_child->id, rh_child->id); order->rh_rsc = rh_child; lh_child->cmds->rsc_order_lh(lh_child, order, data_set); order->rh_rsc = rh_saved; ); } else { #if 0 if(order->type != pe_order_optional) { crm_debug("Upgraded ordering constraint %d - 0x%.6x", order->id, order->type); native_rsc_order_lh(rsc, order, data_set); } #endif if(order->type & pe_order_implies_left) { if(rsc->variant == order->rh_rsc->variant) { crm_debug_2("Clone-to-clone ordering: %s -> %s 0x%.6x", order->lh_action_task, order->rh_action_task, order->type); /* stop instances on the same nodes as stopping RHS instances */ slist_iter( child_rsc, resource_t, rsc->children, lpc, native_rsc_order_lh(child_rsc, order, data_set); ); } else { /* stop everything */ crm_debug_2("Clone-to-* ordering: %s -> %s 0x%.6x", order->lh_action_task, order->rh_action_task, order->type); slist_iter( child_rsc, resource_t, rsc->children, lpc, native_rsc_order_lh(child_rsc, order, data_set); ); } } } if(do_interleave == FALSE || clone_data->ordered) { order->lh_action_task = convert_non_atomic_task(order->lh_action_task, FALSE, TRUE); native_rsc_order_lh(rsc, order, data_set); } if(is_set(rsc->flags, pe_rsc_notify)) { order->type = pe_order_optional; order->lh_action_task = convert_non_atomic_task(order->lh_action_task, TRUE, TRUE); native_rsc_order_lh(rsc, order, data_set); } } void clone_rsc_order_rh( action_t *lh_action, resource_t *rsc, order_constraint_t *order) { enum pe_ordering type = order->type; clone_variant_data_t *clone_data = NULL; + resource_t *lh_p = uber_parent(lh_action->rsc); + get_clone_variant_data(clone_data, rsc); + crm_debug_2("%s->%s", order->lh_action_task, order->rh_action_task); if(safe_str_eq(CRM_OP_PROBED, lh_action->uuid)) { slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->rsc_order_rh(lh_action, child_rsc, order); ); if(rsc->fns->state(rsc, TRUE) < RSC_ROLE_STARTED && rsc->fns->state(rsc, FALSE) > RSC_ROLE_STOPPED) { order->type |= pe_order_implies_right; } + + } else if(lh_p && lh_p != rsc && lh_p->variant < pe_clone) { + GListPtr hosts = NULL; + GListPtr lh_hosts = NULL; + GListPtr intersection = NULL; + const char *reason = "unknown"; + gboolean loc_type = TRUE; + + if(safe_str_eq(lh_action->task, RSC_STOP) + || safe_str_eq(lh_action->task, RSC_STOPPED) + || safe_str_eq(lh_action->task, RSC_DEMOTE) + || safe_str_eq(lh_action->task, RSC_DEMOTED)) { + reason = "down activiity"; + lh_p->fns->location(lh_p, &lh_hosts, TRUE); + + } else { + loc_type = FALSE; + reason = "up activiity"; + lh_p->fns->location(lh_p, &lh_hosts, FALSE); + } + + /* slist_iter(h, node_t, lh_hosts, llpc, crm_info("LHH: %s", h->details->uname)); */ + + slist_iter( + child_rsc, resource_t, rsc->children, lpc, + + child_rsc->fns->location(child_rsc, &hosts, loc_type); + /* slist_iter(h, node_t, hosts, llpc, crm_info("H: %s %s", child_rsc->id, h->details->uname)); */ + + intersection = node_list_and(hosts, lh_hosts, FALSE); + if(intersection != NULL) { + crm_debug("Enforcing %s->%s for %s: found %s", + order->lh_action_task, order->rh_action_task, child_rsc->id, reason); + child_rsc->cmds->rsc_order_rh(lh_action, child_rsc, order); + order->type = pe_order_optional; + native_rsc_order_rh(lh_action, rsc, order); + order->type = type; + + + } else { + crm_debug("Ignoring %s->%s for %s: no relevant %s", + order->lh_action_task, order->rh_action_task, child_rsc->id, reason); + } + + g_list_free(intersection); + g_list_free(hosts); hosts = NULL; + ); + + g_list_free(lh_hosts); + return; } + native_rsc_order_rh(lh_action, rsc, order); order->type = type; } void clone_rsc_location(resource_t *rsc, rsc_to_node_t *constraint) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); crm_debug_3("Processing location constraint %s for %s", constraint->id, rsc->id); native_rsc_location(rsc, constraint); slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->rsc_location(child_rsc, constraint); ); } void clone_expand(resource_t *rsc, pe_working_set_t *data_set) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); crm_debug_2("Processing actions from %s", rsc->id); if(clone_data->start_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->start_notify); expand_notification_data(clone_data->start_notify); create_notifications(rsc, clone_data->start_notify, data_set); } if(clone_data->stop_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->stop_notify); expand_notification_data(clone_data->stop_notify); create_notifications(rsc, clone_data->stop_notify, data_set); } if(clone_data->promote_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->promote_notify); expand_notification_data(clone_data->promote_notify); create_notifications(rsc, clone_data->promote_notify, data_set); } if(clone_data->demote_notify) { collect_notification_data(rsc, TRUE, TRUE, clone_data->demote_notify); expand_notification_data(clone_data->demote_notify); create_notifications(rsc, clone_data->demote_notify, data_set); } /* Now that the notifcations have been created we can expand the children */ slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->expand(child_rsc, data_set)); native_expand(rsc, data_set); /* The notifications are in the graph now, we can destroy the notify_data */ free_notification_data(clone_data->demote_notify); free_notification_data(clone_data->stop_notify); free_notification_data(clone_data->start_notify); free_notification_data(clone_data->promote_notify); } static gint sort_rsc_id(gconstpointer a, gconstpointer b) { const resource_t *resource1 = (const resource_t*)a; const resource_t *resource2 = (const resource_t*)b; CRM_ASSERT(resource1 != NULL); CRM_ASSERT(resource2 != NULL); return strcmp(resource1->id, resource2->id); } static resource_t *find_instance_on(resource_t *rsc, node_t *node) { slist_iter(child, resource_t, rsc->children, lpc, GListPtr known_list = NULL; rsc_known_on(child, &known_list); slist_iter(known, node_t, known_list, lpc2, if(node->details == known->details) { g_list_free(known_list); return child; } ); g_list_free(known_list); ); return NULL; } gboolean clone_create_probe(resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set) { gboolean any_created = FALSE; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); rsc->children = g_list_sort(rsc->children, sort_rsc_id); if(rsc->children == NULL) { pe_warn("Clone %s has no children", rsc->id); return FALSE; } if(is_not_set(rsc->flags, pe_rsc_unique) && clone_data->clone_node_max == 1) { /* only look for one copy */ resource_t *child = NULL; /* Try whoever we probed last time */ child = find_instance_on(rsc, node); if(child) { return child->cmds->create_probe( child, node, complete, force, data_set); } /* Try whoever we plan on starting there */ slist_iter( child_rsc, resource_t, rsc->children, lpc, node_t *local_node = child_rsc->fns->location(child_rsc, NULL, FALSE); if(local_node == NULL) { continue; } if(local_node->details == node->details) { return child_rsc->cmds->create_probe( child_rsc, node, complete, force, data_set); } ); /* Fall back to the first clone instance */ child = rsc->children->data; return child->cmds->create_probe(child, node, complete, force, data_set); } slist_iter( child_rsc, resource_t, rsc->children, lpc, if(child_rsc->cmds->create_probe( child_rsc, node, complete, force, data_set)) { any_created = TRUE; } if(any_created && is_not_set(rsc->flags, pe_rsc_unique) && clone_data->clone_node_max == 1) { /* only look for one copy (clone :0) */ break; } ); return any_created; } diff --git a/pengine/native.c b/pengine/native.c index e8ea85cc2a..7fc9ffd346 100644 --- a/pengine/native.c +++ b/pengine/native.c @@ -1,2192 +1,2197 @@ /* * Copyright (C) 2004 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #define DELETE_THEN_REFRESH 1 /* The crmd will remove the resource from the CIB itself, making this redundant */ #define VARIANT_NATIVE 1 #include void native_rsc_colocation_rh_must(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh); void native_rsc_colocation_rh_mustnot(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh); void Recurring(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set); void RecurringOp(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set); void pe_post_notify( resource_t *rsc, node_t *node, action_t *op, notify_data_t *n_data, pe_working_set_t *data_set); void NoRoleChange (resource_t *rsc, node_t *current, node_t *next, pe_working_set_t *data_set); gboolean DeleteRsc (resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set); gboolean StopRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean StartRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean DemoteRsc (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean RoleError (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); gboolean NullOp (resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set); enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_UNKNOWN, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t*,node_t*,gboolean,pe_working_set_t*) = { /* Current State */ /* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, /* Slave */ { RoleError, StopRsc, RoleError, NullOp, PromoteRsc, }, /* Master */ { RoleError, RoleError, RoleError, DemoteRsc, NullOp, }, }; static gboolean have_enough_capacity(node_t *node, resource_t *rsc) { GHashTableIter iter; const char *key = NULL; const char *value = NULL; int required = 0; int remaining = 0; int rc = TRUE; g_hash_table_iter_init(&iter, rsc->utilization); while (g_hash_table_iter_next(&iter, (gpointer)&key, (gpointer)&value)) { required = crm_parse_int(value, "0"); remaining = crm_parse_int(g_hash_table_lookup(node->details->utilization, key), "0"); if (required > remaining) { crm_debug("Node %s has no enough %s for resource %s: required=%d remaining=%d", node->details->uname, key, rsc->id, required, remaining); rc = FALSE; } } return rc; } static gboolean native_choose_node(resource_t *rsc, pe_working_set_t *data_set) { /* 1. Sort by weight 2. color.chosen_node = the node (of those with the highest wieght) with the fewest resources 3. remove color.chosen_node from all other colors */ int alloc_details = scores_log_level+1; GListPtr nodes = NULL; node_t *chosen = NULL; int lpc = 0; int multiple = 0; int length = 0; if (safe_str_neq(data_set->placement_strategy, "default")) { slist_iter( node, node_t, data_set->nodes, lpc, if (have_enough_capacity(node, rsc) == FALSE) { crm_debug("Resource %s cannot be allocated to node %s: none of enough capacity", rsc->id, node->details->uname); resource_location(rsc, node, -INFINITY, "__limit_utilization_", data_set); } ); dump_node_scores(alloc_details, rsc, "Post-utilization", rsc->allowed_nodes); } length = g_list_length(rsc->allowed_nodes); if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to?TRUE:FALSE; } crm_debug_3("Choosing node for %s from %d candidates", rsc->id, length); if(rsc->allowed_nodes) { rsc->allowed_nodes = g_list_sort_with_data(rsc->allowed_nodes, sort_node_weight, data_set); nodes = rsc->allowed_nodes; chosen = g_list_nth_data(nodes, 0); if(chosen && chosen->weight > 0 && can_run_resources(chosen)) { node_t *running = g_list_nth_data(rsc->running_on, 0); if(can_run_resources(running) == FALSE) { running = NULL; } for(lpc = 1; lpc < length; lpc++) { node_t *tmp = g_list_nth_data(nodes, lpc); if(tmp->weight == chosen->weight) { multiple++; if(running && tmp->details == running->details) { /* prefer the existing node if scores are equal */ chosen = tmp; } } } } } if(multiple > 1) { int log_level = LOG_INFO; char *score = score2char(chosen->weight); if(chosen->weight >= INFINITY) { log_level = LOG_WARNING; } do_crm_log(log_level, "%d nodes with equal score (%s) for" " running %s resources. Chose %s.", multiple, score, rsc->id, chosen->details->uname); crm_free(score); } return native_assign_node(rsc, nodes, chosen, FALSE); } int node_list_attr_score(GListPtr list, const char *attr, const char *value) { int best_score = -INFINITY; const char *best_node = NULL; if(attr == NULL) { attr = "#"XML_ATTR_UNAME; } slist_iter(node, node_t, list, lpc, int weight = node->weight; if(can_run_resources(node) == FALSE) { weight = -INFINITY; } if(weight > best_score || best_node == NULL) { const char *tmp = g_hash_table_lookup(node->details->attrs, attr); if(safe_str_eq(value, tmp)) { best_score = weight; best_node = node->details->uname; } } ); if(safe_str_neq(attr, "#"XML_ATTR_UNAME)) { crm_info("Best score for %s=%s was %s with %d", attr, value, best_node?best_node:"", best_score); } return best_score; } static void node_list_update(GListPtr list1, GListPtr list2, const char *attr, int factor) { int score = 0; if(attr == NULL) { attr = "#"XML_ATTR_UNAME; } slist_iter( node, node_t, list1, lpc, CRM_CHECK(node != NULL, continue); score = node_list_attr_score(list2, attr, g_hash_table_lookup(node->details->attrs, attr)); if(factor < 0 && score < 0) { /* Negative preference for a node with a negative score * should not become a positive preference * * TODO: Decide if we want to filter only if weight == -INFINITY * */ continue; } crm_debug_2("%s: %d + %d*%d", node->details->uname, node->weight, factor, score); node->weight = merge_weights(factor*score, node->weight); ); } GListPtr native_merge_weights( resource_t *rsc, const char *rhs, GListPtr nodes, const char *attr, int factor, gboolean allow_rollback) { GListPtr archive = NULL; + int multiplier = 1; + if(factor < 0) { + multiplier = -1; + } if(is_set(rsc->flags, pe_rsc_merging)) { crm_info("%s: Breaking dependancy loop at %s", rhs, rsc->id); return nodes; } set_bit(rsc->flags, pe_rsc_merging); crm_debug_2("%s: Combining scores from %s", rhs, rsc->id); if(allow_rollback) { archive = node_list_dup(nodes, FALSE, FALSE); } node_list_update(nodes, rsc->allowed_nodes, attr, factor); if(can_run_any(nodes) == FALSE) { if(archive) { crm_info("%s: Rolling back scores from %s", rhs, rsc->id); pe_free_shallow_adv(nodes, TRUE); nodes = archive; } goto bail; } pe_free_shallow_adv(archive, TRUE); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons_lhs, lpc, + crm_info("%s: Rolling back scores from %s", rhs, rsc->id); nodes = constraint->rsc_lh->cmds->merge_weights( constraint->rsc_lh, rhs, nodes, constraint->node_attribute, - constraint->score/INFINITY, allow_rollback); + multiplier*constraint->score/INFINITY, allow_rollback); ); bail: clear_bit(rsc->flags, pe_rsc_merging); return nodes; } node_t * native_color(resource_t *rsc, pe_working_set_t *data_set) { int alloc_details = scores_log_level+1; if(rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) { /* never allocate children on their own */ crm_debug("Escalating allocation of %s to its parent: %s", rsc->id, rsc->parent->id); rsc->parent->cmds->color(rsc->parent, data_set); } if(is_not_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; } if(is_set(rsc->flags, pe_rsc_allocating)) { crm_debug("Dependancy loop detected involving %s", rsc->id); return NULL; } set_bit(rsc->flags, pe_rsc_allocating); print_resource(alloc_details, "Allocating: ", rsc, FALSE); dump_node_scores(alloc_details, rsc, "Pre-allloc", rsc->allowed_nodes); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons, lpc, resource_t *rsc_rh = constraint->rsc_rh; crm_debug_2("%s: Pre-Processing %s (%s)", rsc->id, constraint->id, rsc_rh->id); rsc_rh->cmds->color(rsc_rh, data_set); rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint); ); dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons_lhs, lpc, rsc->allowed_nodes = constraint->rsc_lh->cmds->merge_weights( constraint->rsc_lh, rsc->id, rsc->allowed_nodes, constraint->node_attribute, constraint->score/INFINITY, TRUE); ); print_resource(LOG_DEBUG_2, "Allocating: ", rsc, FALSE); if(rsc->next_role == RSC_ROLE_STOPPED) { crm_debug_2("Making sure %s doesn't get allocated", rsc->id); /* make sure it doesnt come up again */ resource_location( rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set); } dump_node_scores(show_scores?0:scores_log_level, rsc, __PRETTY_FUNCTION__, rsc->allowed_nodes); if(is_set(data_set->flags, pe_flag_stonith_enabled) && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) { clear_bit(rsc->flags, pe_rsc_managed); } if(is_not_set(rsc->flags, pe_rsc_managed)) { const char *reason = NULL; node_t *assign_to = NULL; if(rsc->running_on == NULL) { reason = "inactive"; } else if(rsc->role == RSC_ROLE_MASTER) { assign_to = rsc->running_on->data; reason = "master"; } else if(is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { assign_to = rsc->running_on->data; reason = "active"; } crm_info("Unmanaged resource %s allocated to %s: %s", rsc->id, assign_to?assign_to->details->uname:"'nowhere'", reason); native_assign_node(rsc, NULL, assign_to, TRUE); } else if(is_set(data_set->flags, pe_flag_stop_everything)) { crm_debug("Forcing %s to stop", rsc->id); native_assign_node(rsc, NULL, NULL, TRUE); } else if(is_set(rsc->flags, pe_rsc_provisional) && native_choose_node(rsc, data_set) ) { crm_debug_3("Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } else if(rsc->allocated_to == NULL) { if(is_not_set(rsc->flags, pe_rsc_orphan)) { pe_warn("Resource %s cannot run anywhere", rsc->id); } else if(rsc->running_on != NULL) { crm_info("Stopping orphan resource %s", rsc->id); } } else { crm_debug("Pre-Allocated resource %s to %s", rsc->id, rsc->allocated_to->details->uname); } clear_bit(rsc->flags, pe_rsc_allocating); print_resource(LOG_DEBUG_3, "Allocated ", rsc, TRUE); return rsc->allocated_to; } static gboolean is_op_dup( resource_t *rsc, const char *name, const char *interval) { gboolean dup = FALSE; const char *id = NULL; const char *value = NULL; xml_child_iter_filter( rsc->ops_xml, operation, "op", value = crm_element_value(operation, "name"); if(safe_str_neq(value, name)) { continue; } value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if(value == NULL) { value = "0"; } if(safe_str_neq(value, interval)) { continue; } if(id == NULL) { id = ID(operation); } else { crm_config_err("Operation %s is a duplicate of %s", ID(operation), id); crm_config_err("Do not use the same (name, interval) combination more than once per resource"); dup = TRUE; } ); return dup; } void RecurringOp(resource_t *rsc, action_t *start, node_t *node, xmlNode *operation, pe_working_set_t *data_set) { char *key = NULL; const char *name = NULL; const char *value = NULL; const char *interval = NULL; const char *node_uname = NULL; unsigned long long interval_ms = 0; action_t *mon = NULL; gboolean is_optional = TRUE; GListPtr possible_matches = NULL; crm_debug_2("Creating recurring action %s for %s in role %s", ID(operation), rsc->id, role2text(rsc->next_role)); if(node != NULL) { node_uname = node->details->uname; } interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); interval_ms = crm_get_interval(interval); if(interval_ms == 0) { return; } name = crm_element_value(operation, "name"); if(is_op_dup(rsc, name, interval)) { return; } key = generate_op_key(rsc->id, name, interval_ms); if(find_rsc_op_entry(rsc, key) == NULL) { /* disabled */ return; } if(start != NULL) { crm_debug_3("Marking %s %s due to %s", key, start->optional?"optional":"manditory", start->uuid); is_optional = start->optional; } else { crm_debug_2("Marking %s optional", key); is_optional = TRUE; } /* start a monitor for an already active resource */ possible_matches = find_actions_exact(rsc->actions, key, node); if(possible_matches == NULL) { is_optional = FALSE; crm_debug_3("Marking %s manditory: not active", key); } else { g_list_free(possible_matches); } value = crm_element_value(operation, "role"); if((rsc->next_role == RSC_ROLE_MASTER && value == NULL) || (value != NULL && text2role(value) != rsc->next_role)) { int log_level = LOG_DEBUG_2; const char *result = "Ignoring"; if(is_optional) { char *local_key = crm_strdup(key); log_level = LOG_INFO; result = "Cancelling"; /* its running : cancel it */ mon = custom_action( rsc, local_key, RSC_CANCEL, node, FALSE, TRUE, data_set); crm_free(mon->task); mon->task = crm_strdup(RSC_CANCEL); add_hash_param(mon->meta, XML_LRM_ATTR_INTERVAL, interval); add_hash_param(mon->meta, XML_LRM_ATTR_TASK, name); local_key = NULL; switch(rsc->role) { case RSC_ROLE_SLAVE: case RSC_ROLE_STARTED: if(rsc->next_role == RSC_ROLE_MASTER) { local_key = promote_key(rsc); } else if(rsc->next_role == RSC_ROLE_STOPPED) { local_key = stop_key(rsc); } break; case RSC_ROLE_MASTER: local_key = demote_key(rsc); break; default: break; } if(local_key) { custom_action_order(rsc, NULL, mon, rsc, local_key, NULL, pe_order_runnable_left, data_set); } mon = NULL; } do_crm_log(log_level, "%s action %s (%s vs. %s)", result , key, value?value:role2text(RSC_ROLE_SLAVE), role2text(rsc->next_role)); crm_free(key); key = NULL; return; } mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set); key = mon->uuid; if(is_optional) { crm_debug_2("%s\t %s (optional)", crm_str(node_uname), mon->uuid); } if(start == NULL || start->runnable == FALSE) { crm_debug("%s\t %s (cancelled : start un-runnable)", crm_str(node_uname), mon->uuid); mon->runnable = FALSE; } else if(node == NULL || node->details->online == FALSE || node->details->unclean) { crm_debug("%s\t %s (cancelled : no node available)", crm_str(node_uname), mon->uuid); mon->runnable = FALSE; } else if(mon->optional == FALSE) { crm_notice(" Start recurring %s (%llus) for %s on %s", mon->task, interval_ms/1000, rsc->id, crm_str(node_uname)); } if(rsc->next_role == RSC_ROLE_MASTER) { char *running_master = crm_itoa(EXECRA_RUNNING_MASTER); add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master); crm_free(running_master); } if(node == NULL || is_set(rsc->flags, pe_rsc_managed)) { custom_action_order(rsc, start_key(rsc), NULL, NULL, crm_strdup(key), mon, pe_order_implies_right|pe_order_runnable_left, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { custom_action_order( rsc, promote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional|pe_order_runnable_left, data_set); } else if(rsc->role == RSC_ROLE_MASTER) { custom_action_order( rsc, demote_key(rsc), NULL, rsc, NULL, mon, pe_order_optional|pe_order_runnable_left, data_set); } } } void Recurring(resource_t *rsc, action_t *start, node_t *node, pe_working_set_t *data_set) { if(is_not_set(data_set->flags, pe_flag_maintenance_mode)) { xml_child_iter_filter( rsc->ops_xml, operation, "op", RecurringOp(rsc, start, node, operation, data_set); ); } } void native_create_actions(resource_t *rsc, pe_working_set_t *data_set) { action_t *start = NULL; node_t *chosen = NULL; enum rsc_role_e role = RSC_ROLE_UNKNOWN; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; crm_debug_2("Createing actions for %s: %s->%s", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); chosen = rsc->allocated_to; if(chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STARTED; } else if(rsc->next_role == RSC_ROLE_UNKNOWN) { rsc->next_role = RSC_ROLE_STOPPED; } get_rsc_attributes(rsc->parameters, rsc, chosen, data_set); if(g_list_length(rsc->running_on) > 1) { if(rsc->recovery_type == recovery_stop_start) { pe_proc_warn("Attempting recovery of resource %s", rsc->id); if(rsc->role == RSC_ROLE_MASTER) { DemoteRsc(rsc, NULL, FALSE, data_set); } StopRsc(rsc, NULL, FALSE, data_set); rsc->role = RSC_ROLE_STOPPED; } } else if(rsc->running_on != NULL) { node_t *current = rsc->running_on->data; NoRoleChange(rsc, current, chosen, data_set); } else if(rsc->role == RSC_ROLE_STOPPED && rsc->next_role == RSC_ROLE_STOPPED) { char *key = start_key(rsc); GListPtr possible_matches = find_actions(rsc->actions, key, NULL); slist_iter( action, action_t, possible_matches, lpc, action->optional = TRUE; /* action->pseudo = TRUE; */ ); g_list_free(possible_matches); crm_debug_2("Stopping a stopped resource"); crm_free(key); goto do_recurring; } else if(rsc->role != RSC_ROLE_STOPPED) { /* A cheap trick to account for the fact that Master/Slave groups may not be * completely running when we set their role to Slave */ crm_debug_2("Resetting %s.role = %s (was %s)", rsc->id, role2text(RSC_ROLE_STOPPED), role2text(rsc->role)); rsc->role = RSC_ROLE_STOPPED; } role = rsc->role; while(role != rsc->next_role) { next_role = rsc_state_matrix[role][rsc->next_role]; crm_debug_2("Executing: %s->%s (%s)", role2text(role), role2text(next_role), rsc->id); if(rsc_action_matrix[role][next_role]( rsc, chosen, FALSE, data_set) == FALSE) { break; } role = next_role; } do_recurring: if(rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) { start = start_action(rsc, chosen, TRUE); Recurring(rsc, start, chosen, data_set); } } void native_internal_constraints(resource_t *rsc, pe_working_set_t *data_set) { int type = pe_order_optional; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); if(rsc->variant == pe_native) { type |= pe_order_implies_right; } if(rsc->parent == NULL || rsc->parent->variant == pe_group) { type |= pe_order_restart; } new_rsc_order(rsc, RSC_STOP, rsc, RSC_START, type, data_set); new_rsc_order(rsc, RSC_DEMOTE, rsc, RSC_STOP, pe_order_demote_stop, data_set); new_rsc_order(rsc, RSC_START, rsc, RSC_PROMOTE, pe_order_runnable_left, data_set); new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START, pe_order_optional, data_set); if(is_not_set(rsc->flags, pe_rsc_managed)) { crm_debug_3("Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if(rsc->variant == pe_native && safe_str_neq(class, "stonith")) { custom_action_order( rsc, stop_key(rsc), NULL, NULL, crm_strdup(all_stopped->task), all_stopped, pe_order_implies_right|pe_order_runnable_left, data_set); } } void native_rsc_colocation_lh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { if(rsc_lh == NULL) { pe_err("rsc_lh was NULL for %s", constraint->id); return; } else if(constraint->rsc_rh == NULL) { pe_err("rsc_rh was NULL for %s", constraint->id); return; } crm_debug_2("Processing colocation constraint between %s and %s", rsc_lh->id, rsc_rh->id); rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint); } static gboolean filter_colocation_constraint( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { int level = LOG_DEBUG_4; if(constraint->score == 0){ return FALSE; } if(constraint->score > 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) { do_crm_log_unlikely(level, "LH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score > 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) { do_crm_log_unlikely(level, "RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score < 0 && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) { do_crm_log_unlikely(level, "LH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } if(constraint->score < 0 && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) { do_crm_log_unlikely(level, "RH: Skipping -ve constraint: \"%s\" state filter", role2text(constraint->role_rh)); return FALSE; } return TRUE; } static void colocation_match( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { const char *tmp = NULL; const char *value = NULL; gboolean do_check = FALSE; const char *attribute = "#id"; if(constraint->node_attribute != NULL) { attribute = constraint->node_attribute; } if(rsc_rh->allocated_to) { value = g_hash_table_lookup( rsc_rh->allocated_to->details->attrs, attribute); do_check = TRUE; } else if(constraint->score < 0) { /* nothing to do: * anti-colocation with something thats not running */ return; } slist_iter( node, node_t, rsc_lh->allowed_nodes, lpc, tmp = g_hash_table_lookup(node->details->attrs, attribute); if(do_check && safe_str_eq(tmp, value)) { if(constraint->score < INFINITY) { crm_debug_2("%s: %s.%s += %d", constraint->id, rsc_lh->id, node->details->uname, constraint->score); node->weight = merge_weights( constraint->score, node->weight); } } else if(do_check == FALSE || constraint->score >= INFINITY) { crm_debug_2("%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id, node->details->uname, constraint->score, do_check?"failed":"unallocated"); node->weight = merge_weights(-constraint->score, node->weight); } ); } void native_rsc_colocation_rh( resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint) { crm_debug_2("%sColocating %s with %s (%s, weight=%d)", constraint->score >= 0?"":"Anti-", rsc_lh->id, rsc_rh->id, constraint->id, constraint->score); if(filter_colocation_constraint(rsc_lh, rsc_rh, constraint) == FALSE) { return; } if(is_set(rsc_rh->flags, pe_rsc_provisional)) { return; } else if(is_not_set(rsc_lh->flags, pe_rsc_provisional)) { /* error check */ struct node_shared_s *details_lh; struct node_shared_s *details_rh; if((constraint->score > -INFINITY) && (constraint->score < INFINITY)) { return; } details_rh = rsc_rh->allocated_to?rsc_rh->allocated_to->details:NULL; details_lh = rsc_lh->allocated_to?rsc_lh->allocated_to->details:NULL; if(constraint->score == INFINITY && details_lh != details_rh) { crm_err("%s and %s are both allocated" " but to different nodes: %s vs. %s", rsc_lh->id, rsc_rh->id, details_lh?details_lh->uname:"n/a", details_rh?details_rh->uname:"n/a"); } else if(constraint->score == -INFINITY && details_lh == details_rh) { crm_err("%s and %s are both allocated" " but to the SAME node: %s", rsc_lh->id, rsc_rh->id, details_rh?details_rh->uname:"n/a"); } return; } else { colocation_match(rsc_lh, rsc_rh, constraint); } } static GListPtr find_actions_by_task(GListPtr actions, resource_t *rsc, const char *original_key) { GListPtr list = NULL; list = find_actions(actions, original_key, NULL); if(list == NULL) { /* we're potentially searching a child of the original resource */ char *key = NULL; char *tmp = NULL; char *task = NULL; int interval = 0; if(parse_op_key(original_key, &tmp, &task, &interval)) { key = generate_op_key(rsc->id, task, interval); /* crm_err("looking up %s instead of %s", key, original_key); */ /* slist_iter(action, action_t, actions, lpc, */ /* crm_err(" - %s", action->uuid)); */ list = find_actions(actions, key, NULL); } else { crm_err("search key: %s", original_key); } crm_free(key); crm_free(tmp); crm_free(task); } return list; } void native_rsc_order_lh(resource_t *lh_rsc, order_constraint_t *order, pe_working_set_t *data_set) { GListPtr lh_actions = NULL; action_t *lh_action = order->lh_action; resource_t *rh_rsc = order->rh_rsc; crm_debug_3("Processing LH of ordering constraint %d", order->id); CRM_ASSERT(lh_rsc != NULL); if(lh_action != NULL) { lh_actions = g_list_append(NULL, lh_action); } else if(lh_action == NULL) { lh_actions = find_actions_by_task( lh_rsc->actions, lh_rsc, order->lh_action_task); } if(lh_actions == NULL && lh_rsc != rh_rsc) { char *key = NULL; char *rsc_id = NULL; char *op_type = NULL; int interval = 0; crm_debug_4("No LH-Side (%s/%s) found for constraint %d with %s - creating", lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task); parse_op_key( order->lh_action_task, &rsc_id, &op_type, &interval); key = generate_op_key(lh_rsc->id, op_type, interval); lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set); if(lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) { lh_action->pseudo = TRUE; lh_action->runnable = TRUE; } lh_actions = g_list_append(NULL, lh_action); crm_free(op_type); crm_free(rsc_id); } slist_iter( lh_action_iter, action_t, lh_actions, lpc, if(rh_rsc == NULL && order->rh_action) { rh_rsc = order->rh_action->rsc; } if(rh_rsc) { rh_rsc->cmds->rsc_order_rh( lh_action_iter, rh_rsc, order); } else if(order->rh_action) { order_actions( lh_action_iter, order->rh_action, order->type); } ); pe_free_shallow_adv(lh_actions, FALSE); } void native_rsc_order_rh( action_t *lh_action, resource_t *rsc, order_constraint_t *order) { GListPtr rh_actions = NULL; action_t *rh_action = NULL; CRM_CHECK(rsc != NULL, return); CRM_CHECK(order != NULL, return); rh_action = order->rh_action; crm_debug_3("Processing RH of ordering constraint %d", order->id); if(rh_action != NULL) { rh_actions = g_list_append(NULL, rh_action); } else if(rsc != NULL) { rh_actions = find_actions_by_task( rsc->actions, rsc, order->rh_action_task); } if(rh_actions == NULL) { crm_debug_4("No RH-Side (%s/%s) found for constraint..." " ignoring", rsc->id,order->rh_action_task); if(lh_action) { crm_debug_4("LH-Side was: %s", lh_action->uuid); } return; } slist_iter( rh_action_iter, action_t, rh_actions, lpc, if(lh_action) { order_actions(lh_action, rh_action_iter, order->type); } else if(order->type & pe_order_implies_right) { rh_action_iter->runnable = FALSE; crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, order->type); } else { crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, order->type); } ); pe_free_shallow_adv(rh_actions, FALSE); } void native_rsc_location(resource_t *rsc, rsc_to_node_t *constraint) { GListPtr or_list; crm_debug_2("Applying %s (%s) to %s", constraint->id, role2text(constraint->role_filter), rsc->id); /* take "lifetime" into account */ if(constraint == NULL) { pe_err("Constraint is NULL"); return; } else if(rsc == NULL) { pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id); return; } else if(constraint->role_filter > 0 && constraint->role_filter != rsc->next_role) { crm_debug("Constraint (%s) is not active (role : %s)", constraint->id, role2text(constraint->role_filter)); return; } else if(is_active(constraint) == FALSE) { crm_debug_2("Constraint (%s) is not active", constraint->id); return; } if(constraint->node_list_rh == NULL) { crm_debug_2("RHS of constraint %s is NULL", constraint->id); return; } or_list = node_list_or( rsc->allowed_nodes, constraint->node_list_rh, FALSE); pe_free_shallow(rsc->allowed_nodes); rsc->allowed_nodes = or_list; slist_iter(node, node_t, or_list, lpc, crm_debug_3("%s + %s : %d", rsc->id, node->details->uname, node->weight); ); } void native_expand(resource_t *rsc, pe_working_set_t *data_set) { crm_debug_3("Processing actions from %s", rsc->id); slist_iter( action, action_t, rsc->actions, lpc, crm_debug_4("processing action %d for rsc=%s", action->id, rsc->id); graph_element_from_action(action, data_set); ); slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->expand(child_rsc, data_set); ); } void LogActions(resource_t *rsc, pe_working_set_t *data_set) { node_t *next = NULL; node_t *current = NULL; gboolean moving = FALSE; if(rsc->children) { slist_iter( child_rsc, resource_t, rsc->children, lpc, LogActions(child_rsc, data_set); ); return; } next = rsc->allocated_to; if(rsc->running_on) { current = rsc->running_on->data; if(rsc->role == RSC_ROLE_STOPPED) { /* * This can occur when resources are being recovered * We fiddle with the current role in native_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if(current == NULL && is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } if(is_not_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { crm_notice("Leave resource %s\t(%s%s)", rsc->id, role2text(rsc->role), is_not_set(rsc->flags, pe_rsc_managed)?" unmanaged":""); return; } if(current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) { moving = TRUE; } if(rsc->role == rsc->next_role) { action_t *start = NULL; char *key = start_key(rsc); GListPtr possible_matches = find_actions(rsc->actions, key, next); crm_free(key); if(possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } key = generate_op_key(rsc->id, CRMD_ACTION_MIGRATED, 0); possible_matches = find_actions(rsc->actions, key, next); crm_free(key); CRM_CHECK(next != NULL,); if(next == NULL) { } else if(possible_matches) { crm_notice("Migrate resource %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); g_list_free(possible_matches); } else if(start == NULL || start->optional) { crm_notice("Leave resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(moving && current) { crm_notice("Move resource %s\t(%s %s -> %s)", rsc->id, role2text(rsc->role), current->details->uname, next->details->uname); } else if(is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Recover resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else if(start && start->runnable == FALSE) { crm_notice("Stop resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } else { crm_notice("Restart resource %s\t(%s %s)", rsc->id, role2text(rsc->role), next->details->uname); } return; } if(rsc->role > RSC_ROLE_SLAVE && rsc->role > rsc->next_role) { CRM_CHECK(current != NULL,); if(current != NULL) { crm_notice("Demote %s\t(%s -> %s %s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), current->details->uname); } } if(rsc->next_role == RSC_ROLE_STOPPED || moving) { CRM_CHECK(current != NULL,); slist_iter(node, node_t, rsc->running_on, lpc, crm_notice("Stop resource %s\t(%s)", rsc->id, node->details->uname)); } if(rsc->role == RSC_ROLE_STOPPED || moving) { CRM_CHECK(next != NULL,); if(next != NULL) { crm_notice("Start %s\t(%s)", rsc->id, next->details->uname); } } if(rsc->next_role > RSC_ROLE_SLAVE && rsc->role < rsc->next_role) { CRM_CHECK(next != NULL,); crm_notice("Promote %s\t(%s -> %s %s)", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next->details->uname); } } void NoRoleChange(resource_t *rsc, node_t *current, node_t *next, pe_working_set_t *data_set) { action_t *stop = NULL; action_t *start = NULL; GListPtr possible_matches = NULL; crm_debug_2("Executing: %s (role=%s)", rsc->id, role2text(rsc->next_role)); if(current == NULL || next == NULL) { return; } if(is_set(rsc->flags, pe_rsc_failed) || safe_str_neq(current->details->id, next->details->id)) { if(rsc->next_role > RSC_ROLE_STARTED) { gboolean optional = TRUE; if(rsc->role == RSC_ROLE_MASTER) { optional = FALSE; } DemoteRsc(rsc, current, optional, data_set); } if(rsc->role == RSC_ROLE_MASTER) { DemoteRsc(rsc, current, FALSE, data_set); } StopRsc(rsc, current, FALSE, data_set); StartRsc(rsc, next, FALSE, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { PromoteRsc(rsc, next, FALSE, data_set); } possible_matches = find_recurring_actions(rsc->actions, next); slist_iter(match, action_t, possible_matches, lpc, if(match->optional == FALSE) { crm_debug("Fixing recurring action: %s", match->uuid); match->optional = TRUE; } ); g_list_free(possible_matches); } else if(is_set(rsc->flags, pe_rsc_start_pending)) { start = start_action(rsc, next, TRUE); if(start->runnable) { /* wait for StartRsc() to be called */ rsc->role = RSC_ROLE_STOPPED; } else { /* wait for StopRsc() to be called */ rsc->next_role = RSC_ROLE_STOPPED; } } else { stop = stop_action(rsc, current, TRUE); start = start_action(rsc, next, TRUE); stop->optional = start->optional; if(rsc->next_role > RSC_ROLE_STARTED) { DemoteRsc(rsc, current, start->optional, data_set); } StopRsc(rsc, current, start->optional, data_set); StartRsc(rsc, current, start->optional, data_set); if(rsc->next_role == RSC_ROLE_MASTER) { PromoteRsc(rsc, next, start->optional, data_set); } if(start->runnable == FALSE) { rsc->next_role = RSC_ROLE_STOPPED; } } } gboolean StopRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { action_t *stop = NULL; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); crm_debug_2("Executing: %s", rsc->id); if(rsc->next_role == RSC_ROLE_STOPPED && rsc->variant == pe_native && safe_str_eq(class, "stonith")) { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); custom_action_order( NULL, crm_strdup(all_stopped->task), all_stopped, rsc, stop_key(rsc), NULL, pe_order_implies_left|pe_order_stonith_stop, data_set); } slist_iter( current, node_t, rsc->running_on, lpc, stop = stop_action(rsc, current, optional); if(is_set(data_set->flags, pe_flag_remove_after_stop)) { DeleteRsc(rsc, current, optional, data_set); } ); return TRUE; } gboolean StartRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { action_t *start = NULL; crm_debug_2("Executing: %s", rsc->id); start = start_action(rsc, next, TRUE); if(start->runnable && optional == FALSE) { start->optional = FALSE; } return TRUE; } gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { char *key = NULL; gboolean runnable = TRUE; GListPtr action_list = NULL; crm_debug_2("Executing: %s", rsc->id); CRM_CHECK(rsc->next_role == RSC_ROLE_MASTER, crm_err("Next role: %s", role2text(rsc->next_role)); return FALSE); CRM_CHECK(next != NULL, return FALSE); key = start_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); crm_free(key); slist_iter(start, action_t, action_list, lpc, if(start->runnable == FALSE) { runnable = FALSE; } ); g_list_free(action_list); if(runnable) { promote_action(rsc, next, optional); return TRUE; } crm_debug("%s\tPromote %s (canceled)", next->details->uname, rsc->id); key = promote_key(rsc); action_list = find_actions_exact(rsc->actions, key, next); crm_free(key); slist_iter(promote, action_t, action_list, lpc, promote->runnable = FALSE; ); g_list_free(action_list); return TRUE; } gboolean DemoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug_2("Executing: %s", rsc->id); /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */ slist_iter( current, node_t, rsc->running_on, lpc, demote_action(rsc, current, optional); ); return TRUE; } gboolean RoleError(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug("Executing: %s", rsc->id); CRM_CHECK(FALSE, return FALSE); return FALSE; } gboolean NullOp(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set) { crm_debug_2("Executing: %s", rsc->id); return FALSE; } gboolean DeleteRsc(resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set) { action_t *delete = NULL; #if DELETE_THEN_REFRESH action_t *refresh = NULL; #endif if(is_set(rsc->flags, pe_rsc_failed)) { crm_debug_2("Resource %s not deleted from %s: failed", rsc->id, node->details->uname); return FALSE; } else if(node == NULL) { crm_debug_2("Resource %s not deleted: NULL node", rsc->id); return FALSE; } else if(node->details->unclean || node->details->online == FALSE) { crm_debug_2("Resource %s not deleted from %s: unrunnable", rsc->id, node->details->uname); return FALSE; } crm_notice("Removing %s from %s", rsc->id, node->details->uname); delete = delete_action(rsc, node, optional); new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE, optional?pe_order_implies_right:pe_order_implies_left, data_set); #if DELETE_THEN_REFRESH refresh = custom_action( NULL, crm_strdup(CRM_OP_LRM_REFRESH), CRM_OP_LRM_REFRESH, node, FALSE, TRUE, data_set); add_hash_param(refresh->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); order_actions(delete, refresh, pe_order_optional); #endif return TRUE; } #include <../lib/pengine/unpack.h> gboolean native_create_probe(resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set) { char *key = NULL; char *target_rc = NULL; action_t *probe = NULL; node_t *running = NULL; resource_t *top = uber_parent(rsc); CRM_CHECK(node != NULL, return FALSE); if(rsc->children) { gboolean any_created = FALSE; slist_iter( child_rsc, resource_t, rsc->children, lpc, any_created = child_rsc->cmds->create_probe( child_rsc, node, complete, force, data_set) || any_created; ); return any_created; } if(is_set(rsc->flags, pe_rsc_orphan)) { crm_debug_2("Skipping orphan: %s", rsc->id); return FALSE; } running = pe_find_node_id(rsc->known_on, node->details->id); if(force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ crm_debug_3("Skipping active: %s", rsc->id); return FALSE; } if(running == NULL && is_set(top->flags, pe_rsc_unique) == FALSE) { /* Annoyingly we also need to check any other clone instances * Clumsy, but it will work. * * An alternative would be to update known_on for every peer * during process_rsc_state() */ char *clone_id = clone_zero(rsc->id); resource_t *peer = pe_find_resource(top->children, clone_id); while(peer && running == NULL) { running = pe_find_node_id(peer->known_on, node->details->id); if(force == FALSE && running != NULL) { /* we already know the status of the resource on this node */ crm_debug_3("Skipping active clone: %s", rsc->id); crm_free(clone_id); return FALSE; } clone_id = increment_clone(clone_id); peer = pe_find_resource(data_set->resources, clone_id); } crm_free(clone_id); } key = generate_op_key(rsc->id, RSC_STATUS, 0); probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set); probe->optional = FALSE; running = pe_find_node_id(rsc->running_on, node->details->id); if(running == NULL) { target_rc = crm_itoa(EXECRA_NOT_RUNNING); } else if(rsc->role == RSC_ROLE_MASTER) { target_rc = crm_itoa(EXECRA_RUNNING_MASTER); } if(target_rc != NULL) { add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, target_rc); crm_free(target_rc); } crm_debug("Probing %s on %s (%s)", rsc->id, node->details->uname, role2text(rsc->role)); order_actions(probe, complete, pe_order_implies_right); return TRUE; } static void native_start_constraints( resource_t *rsc, action_t *stonith_op, gboolean is_stonith, pe_working_set_t *data_set) { node_t *target = stonith_op?stonith_op->node:NULL; if(is_stonith) { char *key = start_key(rsc); action_t *ready = get_pseudo_op(STONITH_UP, data_set); crm_debug_2("Ordering %s action before stonith events", key); custom_action_order( rsc, key, NULL, NULL, crm_strdup(ready->task), ready, pe_order_optional, data_set); } else { action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set); action_t *stonith_done = get_pseudo_op(STONITH_DONE, data_set); slist_iter(action, action_t, rsc->actions, lpc2, if(action->needs == rsc_req_stonith) { order_actions(stonith_done, action, pe_order_implies_left); } else if(target != NULL && safe_str_eq(action->task, RSC_START) && NULL == pe_find_node_id( rsc->known_on, target->details->id)) { /* if known == NULL, then we dont know if * the resource is active on the node * we're about to shoot * * in this case, regardless of action->needs, * the only safe option is to wait until * the node is shot before doing anything * to with the resource * * its analogous to waiting for all the probes * for rscX to complete before starting rscX * * the most likely explaination is that the * DC died and took its status with it */ crm_info("Ordering %s after %s recovery", action->uuid, target->details->uname); order_actions(all_stopped, action, pe_order_implies_left|pe_order_runnable_left); } ); } } static void native_stop_constraints( resource_t *rsc, action_t *stonith_op, gboolean is_stonith, pe_working_set_t *data_set) { char *key = NULL; GListPtr action_list = NULL; resource_t *top = uber_parent(rsc); key = stop_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); crm_free(key); /* add the stonith OP as a stop pre-req and the mark the stop * as a pseudo op - since its now redundant */ slist_iter( action, action_t, action_list, lpc2, resource_t *parent = NULL; if(action->node->details->online && action->node->details->unclean == FALSE && is_set(rsc->flags, pe_rsc_failed)) { continue; } if(is_set(rsc->flags, pe_rsc_failed)) { crm_warn("Stop of failed resource %s is" " implicit after %s is fenced", rsc->id, action->node->details->uname); } else { crm_info("%s is implicit after %s is fenced", action->uuid, action->node->details->uname); } /* the stop would never complete and is * now implied by the stonith operation */ action->pseudo = TRUE; action->runnable = TRUE; action->implied_by_stonith = TRUE; if(is_stonith == FALSE) { action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); order_actions(stonith_op, action, pe_order_optional); order_actions(stonith_op, parent_stop, pe_order_optional); } if(is_set(rsc->flags, pe_rsc_notify)) { /* Create a second notification that will be delivered * immediately after the node is fenced * * Basic problem: * - C is a clone active on the node to be shot and stopping on another * - R is a resource that depends on C * * + C.stop depends on R.stop * + C.stopped depends on STONITH * + C.notify depends on C.stopped * + C.healthy depends on C.notify * + R.stop depends on C.healthy * * The extra notification here changes * + C.healthy depends on C.notify * into: * + C.healthy depends on C.notify' * + C.notify' depends on STONITH' * thus breaking the loop */ notify_data_t *n_data = create_notification_boundaries(rsc, RSC_STOP, NULL, stonith_op, data_set); crm_info("Creating secondary notification for %s", action->uuid); collect_notification_data(rsc, TRUE, FALSE, n_data); g_hash_table_insert(n_data->keys, crm_strdup("notify_stop_resource"), crm_strdup(rsc->id)); g_hash_table_insert(n_data->keys, crm_strdup("notify_stop_uname"), crm_strdup(action->node->details->uname)); create_notifications(uber_parent(rsc), n_data, data_set); free_notification_data(n_data); } /* find the top-most resource */ parent = rsc->parent; while(parent != NULL && parent->parent != NULL) { parent = parent->parent; } if(parent) { crm_debug_2("Re-creating actions for %s", parent->id); parent->cmds->create_actions(parent, data_set); /* make sure we dont mess anything up in create_actions */ CRM_CHECK(action->pseudo, action->pseudo = TRUE); CRM_CHECK(action->runnable, action->runnable = TRUE); } /* From Bug #1601, successful fencing must be an input to a failed resources stop action. However given group(rA, rB) running on nodeX and B.stop has failed, A := stop healthy resource (rA.stop) B := stop failed resource (pseudo operation B.stop) C := stonith nodeX A requires B, B requires C, C requires A This loop would prevent the cluster from making progress. This block creates the "C requires A" dependancy and therefore must (at least for now) be disabled. Instead, run the block above and treat all resources on nodeX as B would be (marked as a pseudo op depending on the STONITH). TODO: Break the "A requires B" dependancy in update_action() and re-enable this block } else if(is_stonith == FALSE) { crm_info("Moving healthy resource %s" " off %s before fencing", rsc->id, node->details->uname); * stop healthy resources before the * stonith op * custom_action_order( rsc, stop_key(rsc), NULL, NULL,crm_strdup(CRM_OP_FENCE),stonith_op, pe_order_optional, data_set); */ ); g_list_free(action_list); key = demote_key(rsc); action_list = find_actions(rsc->actions, key, stonith_op->node); crm_free(key); slist_iter( action, action_t, action_list, lpc2, if(action->node->details->online == FALSE || is_set(rsc->flags, pe_rsc_failed)) { crm_info("Demote of failed resource %s is" " implict after %s is fenced", rsc->id, action->node->details->uname); /* the stop would never complete and is * now implied by the stonith operation */ action->pseudo = TRUE; action->runnable = TRUE; if(is_stonith == FALSE) { order_actions(stonith_op, action, pe_order_optional); } } ); g_list_free(action_list); } void complex_stonith_ordering( resource_t *rsc, action_t *stonith_op, pe_working_set_t *data_set) { gboolean is_stonith = FALSE; const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); if(rsc->children) { slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->stonith_ordering( child_rsc, stonith_op, data_set); ); return; } if(is_not_set(rsc->flags, pe_rsc_managed)) { crm_debug_3("Skipping fencing constraints for unmanaged resource: %s", rsc->id); return; } if(stonith_op != NULL && safe_str_eq(class, "stonith")) { is_stonith = TRUE; } /* Start constraints */ native_start_constraints(rsc, stonith_op, is_stonith, data_set); /* Stop constraints */ native_stop_constraints(rsc, stonith_op, is_stonith, data_set); } #define ALLOW_WEAK_MIGRATION 0 enum stack_activity { stack_stable = 0, stack_starting = 1, stack_stopping = 2, stack_middle = 4, }; static enum stack_activity find_clone_activity_on(resource_t *rsc, resource_t *target, node_t *node, const char *type) { int mode = stack_stable; action_t *active = NULL; if(target->children) { slist_iter( child, resource_t, target->children, lpc, mode |= find_clone_activity_on(rsc, child, node, type); ); return mode; } active = find_first_action(target->actions, NULL, CRMD_ACTION_START, NULL); if(active && active->optional == FALSE && active->pseudo == FALSE) { crm_debug("%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_starting; } active = find_first_action(target->actions, NULL, CRMD_ACTION_STOP, node); if(active && active->optional == FALSE && active->pseudo == FALSE) { crm_debug("%s: found scheduled %s action (%s)", rsc->id, active->uuid, type); mode |= stack_stopping; } return mode; } static enum stack_activity check_stack_element(resource_t *rsc, resource_t *other_rsc, const char *type) { if(other_rsc == NULL || other_rsc == rsc) { return stack_stable; } else if(other_rsc->variant == pe_native) { crm_notice("Cannot migrate %s due to dependancy on %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } else if(other_rsc == rsc->parent) { int mode = 0; slist_iter(constraint, rsc_colocation_t, other_rsc->rsc_cons, lpc, if(constraint->score > 0) { mode |= check_stack_element(rsc, constraint->rsc_rh, type); } ); return mode; } else if(other_rsc->variant == pe_group) { crm_notice("Cannot migrate %s due to dependancy on group %s (%s)", rsc->id, other_rsc->id, type); return stack_middle; } /* else: >= clone */ /* ## Assumption A depends on clone(B) ## Resource Activity During Move N1 N2 N3 --- --- --- t0 A.stop t1 B.stop B.stop t2 B.start B.start t3 A.start ## Resource Activity During Migration N1 N2 N3 --- --- --- t0 B.start B.start t1 A.stop (1) t2 A.start (2) t3 B.stop B.stop Node 1: Rewritten to be a migrate-to operation Node 2: Rewritten to be a migrate-from operation # Constraints The following constraints already exist in the system. The 'ok' and 'fail' column refers to whether they still hold for migration. a) A.stop -> A.start - ok b) B.stop -> B.start - fail c) A.stop -> B.stop - ok d) B.start -> A.start - ok e) B.stop -> A.start - fail f) A.stop -> B.start - fail ## Scenarios B unchanged - ok B stopping only - fail - possible after fixing 'e' B starting only - fail - possible after fixing 'f' B stoping and starting - fail - constraint 'b' is unfixable B restarting only on N2 - fail - as-per previous only rarer */ /* Only allow migration when the clone is either stable, only starting or only stopping */ return find_clone_activity_on(rsc, other_rsc, NULL, type); } static gboolean at_stack_bottom(resource_t *rsc) { char *key = NULL; action_t *start = NULL; action_t *other = NULL; int mode = stack_stable; GListPtr action_list = NULL; key = start_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); crm_debug_3("%s: processing", rsc->id); CRM_CHECK(action_list != NULL, return FALSE); start = action_list->data; g_list_free(action_list); slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons, lpc, resource_t *target = constraint->rsc_rh; crm_debug_4("Checking %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if(constraint->score > 0) { mode |= check_stack_element(rsc, target, "coloc"); if(mode & stack_middle) { return FALSE; } else if((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to colocation activity (last was %s)", rsc->id, target->id); return FALSE; } } ); slist_iter( other_w, action_wrapper_t, start->actions_before, lpc, other = other_w->action; #if ALLOW_WEAK_MIGRATION if((other_w->type & pe_order_implies_right) == 0) { crm_debug_3("%s: depends on %s (optional ordering)", rsc->id, other->uuid); continue; } #endif crm_debug_2("%s: Checking %s ordering", rsc->id, other->uuid); if(other->optional == FALSE) { mode |= check_stack_element(rsc, other->rsc, "order"); if(mode & stack_middle) { return FALSE; } else if((mode & stack_stopping) && (mode & stack_starting)) { crm_notice("Cannot migrate %s due to ordering activity (last was %s)", rsc->id, other->rsc->id); return FALSE; } } ); return TRUE; } void complex_migrate_reload(resource_t *rsc, pe_working_set_t *data_set) { char *key = NULL; int level = LOG_DEBUG; GListPtr action_list = NULL; action_t *stop = NULL; action_t *start = NULL; action_t *other = NULL; action_t *action = NULL; const char *value = NULL; if(rsc->children) { slist_iter( child_rsc, resource_t, rsc->children, lpc, child_rsc->cmds->migrate_reload(child_rsc, data_set); ); other = NULL; return; } else if(rsc->variant > pe_native) { return; } do_crm_log_unlikely(level+1, "Processing %s", rsc->id); if(is_not_set(rsc->flags, pe_rsc_managed) || is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending) || rsc->next_role < RSC_ROLE_STARTED || g_list_length(rsc->running_on) != 1) { do_crm_log_unlikely( level+1, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); return; } value = g_hash_table_lookup(rsc->meta, XML_OP_ATTR_ALLOW_MIGRATE); if(crm_is_true(value)) { set_bit(rsc->flags, pe_rsc_can_migrate); } if(rsc->next_role > RSC_ROLE_SLAVE) { clear_bit(rsc->flags, pe_rsc_can_migrate); do_crm_log_unlikely( level+1, "%s: resource role: role=%s", rsc->id, role2text(rsc->next_role)); } key = start_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); if(action_list == NULL) { do_crm_log_unlikely(level, "%s: no start action", rsc->id); return; } start = action_list->data; g_list_free(action_list); if(is_not_set(rsc->flags, pe_rsc_can_migrate) && start->allow_reload_conversion == FALSE) { do_crm_log_unlikely(level+1, "%s: no need to continue", rsc->id); return; } key = stop_key(rsc); action_list = find_actions(rsc->actions, key, NULL); crm_free(key); if(action_list == NULL) { do_crm_log_unlikely(level, "%s: no stop action", rsc->id); return; } stop = action_list->data; g_list_free(action_list); action = start; if(action->pseudo || action->optional || action->node == NULL || action->runnable == FALSE) { do_crm_log_unlikely(level, "%s: %s", rsc->id, action->task); return; } action = stop; if(action->pseudo || action->optional || action->node == NULL || action->runnable == FALSE) { do_crm_log_unlikely(level, "%s: %s", rsc->id, action->task); return; } if(is_set(rsc->flags, pe_rsc_can_migrate)) { if(start->node == NULL || stop->node == NULL || stop->node->details == start->node->details) { clear_bit(rsc->flags, pe_rsc_can_migrate); } else if(at_stack_bottom(rsc) == FALSE) { clear_bit(rsc->flags, pe_rsc_can_migrate); } } if(is_set(rsc->flags, pe_rsc_can_migrate)) { crm_info("Migrating %s from %s to %s", rsc->id, stop->node->details->uname, start->node->details->uname); crm_free(stop->uuid); crm_free(stop->task); stop->task = crm_strdup(RSC_MIGRATE); stop->uuid = generate_op_key(rsc->id, stop->task, 0); add_hash_param(stop->meta, "migrate_source", stop->node->details->uname); add_hash_param(stop->meta, "migrate_target", start->node->details->uname); /* Create the correct ordering ajustments based on find_clone_activity_on(); */ slist_iter( constraint, rsc_colocation_t, rsc->rsc_cons, lpc, resource_t *target = constraint->rsc_rh; crm_info("Repairing %s: %s == %s (%d)", constraint->id, rsc->id, target->id, constraint->score); if(constraint->score > 0) { int mode = check_stack_element(rsc, target, "coloc"); action_t *clone_stop = find_first_action(target->actions, NULL, RSC_STOP, NULL); action_t *clone_start = find_first_action(target->actions, NULL, RSC_STARTED, NULL); CRM_ASSERT(clone_stop != NULL); CRM_ASSERT(clone_start != NULL); CRM_ASSERT((mode & stack_middle) == 0); CRM_ASSERT(((mode & stack_stopping) && (mode & stack_starting)) == 0); if(mode & stack_stopping) { action_t *clone_stop = find_first_action(target->actions, NULL, RSC_STOP, NULL); action_t *clone_start = find_first_action(target->actions, NULL, RSC_STARTED, NULL); crm_debug("Creating %s.start -> %s.stop ordering", rsc->id, target->id); order_actions(start, clone_stop, pe_order_optional); slist_iter( other_w, action_wrapper_t, start->actions_before, lpc2, /* Needed if the clone's started pseudo-action ever gets printed in the graph */ if(other_w->action == clone_start) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, start->uuid); other_w->type = pe_order_none; } ); } else if(mode & stack_starting) { crm_debug("Creating %s.started -> %s.stop ordering", target->id, rsc->id); order_actions(clone_start, stop, pe_order_optional); slist_iter( other_w, action_wrapper_t, clone_stop->actions_before, lpc2, /* Needed if the clone's stop pseudo-action ever gets printed in the graph */ if(other_w->action == stop) { crm_debug("Breaking %s -> %s ordering", other_w->action->uuid, clone_stop->uuid); other_w->type = pe_order_none; } ); } } ); crm_free(start->uuid); crm_free(start->task); start->task = crm_strdup(RSC_MIGRATED); start->uuid = generate_op_key(rsc->id, start->task, 0); add_hash_param(start->meta, "migrate_source_uuid", stop->node->details->id); add_hash_param(start->meta, "migrate_source", stop->node->details->uname); add_hash_param(start->meta, "migrate_target", start->node->details->uname); /* Anything that needed stop to complete, now also needs start to have completed */ slist_iter( other_w, action_wrapper_t, stop->actions_after, lpc, other = other_w->action; if(other->optional || other->rsc != NULL) { continue; } crm_debug("Ordering %s before %s (stop)", start->uuid, other_w->action->uuid); order_actions(start, other, other_w->type); ); /* Stop also needs anything that the start needed to have completed too */ slist_iter( other_w, action_wrapper_t, start->actions_before, lpc, other = other_w->action; if(other->rsc == NULL) { /* nothing */ } else if(other->optional || other->rsc == rsc || other->rsc == rsc->parent) { continue; } crm_debug("Ordering %s before %s (start)", other_w->action->uuid, stop->uuid); order_actions(other, stop, other_w->type); ); } else if(start && stop && start->allow_reload_conversion && stop->node->details == start->node->details) { action_t *rewrite = NULL; start->pseudo = TRUE; /* easier than trying to delete it from the graph */ action = NULL; key = promote_key(rsc); action_list = find_actions(rsc->actions, key, NULL); if(action_list) { action = action_list->data; } if(action && action->optional == FALSE) { action->pseudo = TRUE; } g_list_free(action_list); crm_free(key); action = NULL; key = demote_key(rsc); action_list = find_actions(rsc->actions, key, NULL); if(action_list) { action = action_list->data; } g_list_free(action_list); crm_free(key); if(action && action->optional == FALSE) { rewrite = action; stop->pseudo = TRUE; } else { rewrite = stop; } crm_info("Rewriting %s of %s on %s as a reload", rewrite->task, rsc->id, stop->node->details->uname); crm_free(rewrite->uuid); crm_free(rewrite->task); rewrite->task = crm_strdup("reload"); rewrite->uuid = generate_op_key(rsc->id, rewrite->task, 0); } else { do_crm_log_unlikely(level+1, "%s nothing to do", rsc->id); } } diff --git a/pengine/regression.sh b/pengine/regression.sh index 1424c5627f..94c29fe656 100755 --- a/pengine/regression.sh +++ b/pengine/regression.sh @@ -1,338 +1,340 @@ #!/bin/bash # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # if [ -x /usr/bin/valgrind ]; then export G_SLICE=always-malloc VALGRIND_CMD="valgrind -q --show-reachable=no --leak-check=full --trace-children=no --time-stamp=yes --num-callers=20 --suppressions=./ptest.supp" fi . regression.core.sh create_mode="true" echo Generating test outputs for these tests... # do_test file description echo Done. echo "" echo Performing the following tests... create_mode="false" echo "" do_test simple1 "Offline " do_test simple2 "Start " do_test simple3 "Start 2 " do_test simple4 "Start Failed" do_test simple6 "Stop Start " do_test simple7 "Shutdown " #do_test simple8 "Stonith " #do_test simple9 "Lower version" #do_test simple10 "Higher version" do_test simple11 "Priority (ne)" do_test simple12 "Priority (eq)" do_test simple8 "Stickiness" echo "" do_test params-0 "Params: No change" do_test params-1 "Params: Changed" do_test params-2 "Params: Resource definition" do_test params-4 "Params: Reload" do_test novell-251689 "Resource definition change + target_role=stopped" do_test bug-lf-2106 "Restart all anonymous clone instances after config change" echo "" do_test orphan-0 "Orphan ignore" do_test orphan-1 "Orphan stop" echo "" do_test target-0 "Target Role : baseline" do_test target-1 "Target Role : test" echo "" do_test date-1 "Dates" -d "2005-020" do_test date-2 "Date Spec - Pass" -d "2005-020T12:30" do_test date-3 "Date Spec - Fail" -d "2005-020T11:30" do_test probe-0 "Probe (anon clone)" do_test probe-1 "Pending Probe" do_test probe-2 "Correctly re-probe cloned groups" do_test standby "Standby" do_test comments "Comments" echo "" do_test rsc_dep1 "Must not " do_test rsc_dep3 "Must " do_test rsc_dep5 "Must not 3 " do_test rsc_dep7 "Must 3 " do_test rsc_dep10 "Must (but cant)" do_test rsc_dep2 "Must (running) " do_test rsc_dep8 "Must (running : alt) " do_test rsc_dep4 "Must (running + move)" do_test asymmetric "Asymmetric - require explicit location constraints" echo "" do_test order1 "Order start 1 " do_test order2 "Order start 2 " do_test order3 "Order stop " do_test order4 "Order (multiple) " do_test order5 "Order (move) " do_test order6 "Order (move w/ restart) " do_test order7 "Order (manditory) " do_test order-optional "Order (score=0) " do_test order-required "Order (score=INFINITY) " do_test bug-lf-2171 "Prevent group start when clone is stopped" do_test order-clone "Clone ordering should be able to prevent startup of dependant clones" do_test order-sets "Ordering for resource sets" echo "" do_test coloc-loop "Colocation - loop" do_test coloc-many-one "Colocation - many-to-one" do_test coloc-list "Colocation - many-to-one with list" do_test coloc-group "Colocation - groups" do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation" do_test coloc-attr "Colocation based on node attributes" +do_test coloc-negative-group "Negative colocation with a group" #echo "" #do_test agent1 "version: lt (empty)" #do_test agent2 "version: eq " #do_test agent3 "version: gt " echo "" do_test attrs1 "string: eq (and) " do_test attrs2 "string: lt / gt (and)" do_test attrs3 "string: ne (or) " do_test attrs4 "string: exists " do_test attrs5 "string: not_exists " do_test attrs6 "is_dc: true " do_test attrs7 "is_dc: false " do_test attrs8 "score_attribute " echo "" do_test mon-rsc-1 "Schedule Monitor - start" do_test mon-rsc-2 "Schedule Monitor - move " do_test mon-rsc-3 "Schedule Monitor - pending start " do_test mon-rsc-4 "Schedule Monitor - move/pending start" echo "" do_test rec-rsc-0 "Resource Recover - no start " do_test rec-rsc-1 "Resource Recover - start " do_test rec-rsc-2 "Resource Recover - monitor " do_test rec-rsc-3 "Resource Recover - stop - ignore" do_test rec-rsc-4 "Resource Recover - stop - block " do_test rec-rsc-5 "Resource Recover - stop - fence " do_test rec-rsc-6 "Resource Recover - multiple - restart" do_test rec-rsc-7 "Resource Recover - multiple - stop " do_test rec-rsc-8 "Resource Recover - multiple - block " do_test rec-rsc-9 "Resource Recover - group/group" echo "" do_test quorum-1 "No quorum - ignore" do_test quorum-2 "No quorum - freeze" do_test quorum-3 "No quorum - stop " do_test quorum-4 "No quorum - start anyway" do_test quorum-5 "No quorum - start anyway (group)" do_test quorum-6 "No quorum - start anyway (clone)" echo "" do_test rec-node-1 "Node Recover - Startup - no fence" do_test rec-node-2 "Node Recover - Startup - fence " do_test rec-node-3 "Node Recover - HA down - no fence" do_test rec-node-4 "Node Recover - HA down - fence " do_test rec-node-5 "Node Recover - CRM down - no fence" do_test rec-node-6 "Node Recover - CRM down - fence " do_test rec-node-7 "Node Recover - no quorum - ignore " do_test rec-node-8 "Node Recover - no quorum - freeze " do_test rec-node-9 "Node Recover - no quorum - stop " do_test rec-node-10 "Node Recover - no quorum - stop w/fence" do_test rec-node-11 "Node Recover - CRM down w/ group - fence " do_test rec-node-12 "Node Recover - nothing active - fence " do_test rec-node-13 "Node Recover - failed resource + shutdown - fence " do_test rec-node-15 "Node Recover - unknown lrm section" do_test rec-node-14 "Serialize all stonith's" echo "" do_test multi1 "Multiple Active (stop/start)" echo "" do_test migrate-stop "Migration in a stopping stack" do_test migrate-start "Migration in a starting stack" do_test migrate-stop_start "Migration in a restarting stack" do_test migrate-stop-complex "Migration in a complex stopping stack" do_test migrate-start-complex "Migration in a complex starting stack" do_test migrate-1 "Migrate (migrate)" do_test migrate-2 "Migrate (stable)" do_test migrate-3 "Migrate (failed migrate_to)" do_test migrate-4 "Migrate (failed migrate_from)" do_test novell-252693 "Migration in a stopping stack" do_test novell-252693-2 "Migration in a starting stack" do_test novell-252693-3 "Non-Migration in a starting and stopping stack" do_test bug-1820 "Migration in a group" do_test bug-1820-1 "Non-migration in a group" do_test migrate-5 "Primitive migration with a clone" #echo "" #do_test complex1 "Complex " echo "" do_test group1 "Group " do_test group2 "Group + Native " do_test group3 "Group + Group " do_test group4 "Group + Native (nothing)" do_test group5 "Group + Native (move) " do_test group6 "Group + Group (move) " do_test group7 "Group colocation" do_test group13 "Group colocation (cant run)" do_test group8 "Group anti-colocation" do_test group9 "Group recovery" do_test group10 "Group partial recovery" do_test group11 "Group target_role" do_test group14 "Group stop (graph terminated)" do_test group15 "-ve group colocation" do_test bug-1573 "Partial stop of a group with two children" do_test bug-1718 "Mandatory group ordering - Stop group_FUN" echo "" do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node" do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones" do_test inc0 "Incarnation start" do_test inc1 "Incarnation start order" do_test inc2 "Incarnation silent restart, stop, move" do_test inc3 "Inter-incarnation ordering, silent restart, stop, move" do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)" do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)" do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)" do_test inc7 "Clone colocation" do_test inc8 "Clone anti-colocation" do_test inc9 "Non-unique clone" do_test inc10 "Non-unique clone (stop)" do_test inc11 "Primitive colocation with clones" do_test inc12 "Clone shutdown" do_test cloned-group "Make sure only the correct number of cloned groups are started" do_test clone-no-shuffle "Dont prioritize allocation of instances that must be moved" do_test clone-max-zero "Orphan processing with clone-max=0" do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" do_test bug-lf-2160 "Dont shuffle clones due to colocation" do_test bug-lf-2213 "clone-node-max enforcement for cloned groups" +do_test bug-lf-2153 "Clone ordering constraints" echo "" do_test master-0 "Stopped -> Slave" do_test master-1 "Stopped -> Promote" do_test master-2 "Stopped -> Promote : notify" do_test master-3 "Stopped -> Promote : master location" do_test master-4 "Started -> Promote : master location" do_test master-5 "Promoted -> Promoted" do_test master-6 "Promoted -> Promoted (2)" do_test master-7 "Promoted -> Fenced" do_test master-8 "Promoted -> Fenced -> Moved" do_test master-9 "Stopped + Promotable + No quorum" do_test master-10 "Stopped -> Promotable : notify with monitor" do_test master-11 "Stopped -> Promote : colocation" do_test novell-239082 "Demote/Promote ordering" do_test novell-239087 "Stable master placement" do_test master-12 "Promotion based solely on rsc_location constraints" do_test master-13 "Include preferences of colocated resources when placing master" do_test master-demote "Ordering when actions depends on demoting a slave resource" do_test master-ordering "Prevent resources from starting that need a master" do_test bug-1765 "Master-Master Colocation (dont stop the slaves)" do_test master-group "Promotion of cloned groups" do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily" do_test master-failed-demote "Dont retry failed demote actions" do_test master-failed-demote-2 "Dont retry failed demote actions (notify=false)" do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does" do_test master-reattach "Re-attach to a running master" do_test master-allow-start "Don't include master score if it would prevent allocation" do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints" do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly" do_test master-role "Prevent target-role from promoting more than master-max instances" echo "" do_test managed-0 "Managed (reference)" do_test managed-1 "Not managed - down " do_test managed-2 "Not managed - up " echo "" do_test interleave-0 "Interleave (reference)" do_test interleave-1 "coloc - not interleaved" do_test interleave-2 "coloc - interleaved " do_test interleave-3 "coloc - interleaved (2)" do_test interleave-pseudo-stop "Interleaved clone during stonith" do_test interleave-stop "Interleaved clone during stop" do_test interleave-restart "Interleaved clone during dependancy restart" echo "" do_test notify-0 "Notify reference" do_test notify-1 "Notify simple" do_test notify-2 "Notify simple, confirm" do_test notify-3 "Notify move, confirm" do_test novell-239079 "Notification priority" #do_test notify-2 "Notify - 764" echo "" do_test 594 "OSDL #594" do_test 662 "OSDL #662" do_test 696 "OSDL #696" do_test 726 "OSDL #726" do_test 735 "OSDL #735" do_test 764 "OSDL #764" do_test 797 "OSDL #797" do_test 829 "OSDL #829" do_test 994 "OSDL #994" do_test 994-2 "OSDL #994 - with a dependant resource" do_test 1360 "OSDL #1360 - Clone stickiness" do_test 1484 "OSDL #1484 - on_fail=stop" do_test 1494 "OSDL #1494 - Clone stability" do_test unrunnable-1 "Unrunnable" do_test stonith-0 "Stonith loop - 1" do_test stonith-1 "Stonith loop - 2" do_test stonith-2 "Stonith loop - 3" do_test stonith-3 "Stonith startup" do_test bug-1572-1 "Recovery of groups depending on master/slave" do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted" do_test bug-1685 "Depends-on-master ordering" do_test bug-1822 "Dont promote partially active groups" do_test bug-pm-11 "New resource added to a m/s group" do_test bug-pm-12 "Recover only the failed portion of a cloned group" do_test bug-n-387749 "Don't shuffle clone instances" do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped" do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node" do_test bug-lf-1920 "Correctly handle probes that find active resources" do_test bnc-515172 "Location constraint with multiple expressions" echo "" do_test systemhealth1 "System Health () #1" do_test systemhealth2 "System Health () #2" do_test systemhealth3 "System Health () #3" do_test systemhealthn1 "System Health (None) #1" do_test systemhealthn2 "System Health (None) #2" do_test systemhealthn3 "System Health (None) #3" do_test systemhealthm1 "System Health (Migrate On Red) #1" do_test systemhealthm2 "System Health (Migrate On Red) #2" do_test systemhealthm3 "System Health (Migrate On Red) #3" do_test systemhealtho1 "System Health (Only Green) #1" do_test systemhealtho2 "System Health (Only Green) #2" do_test systemhealtho3 "System Health (Only Green) #3" do_test systemhealthp1 "System Health (Progessive) #1" do_test systemhealthp2 "System Health (Progessive) #2" do_test systemhealthp3 "System Health (Progessive) #3" echo "" do_test utilization "Placement Strategy - utilization" do_test minimal "Placement Strategy - minimal" do_test balanced "Placement Strategy - balanced" echo "" test_results diff --git a/pengine/test10/bug-1572-1.dot b/pengine/test10/bug-1572-1.dot index 668e4d1033..7190cf1ad5 100644 --- a/pengine/test10/bug-1572-1.dot +++ b/pengine/test10/bug-1572-1.dot @@ -1,159 +1,160 @@ digraph "g" { "IPaddr_147_81_84_133_monitor_25000 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "IPaddr_147_81_84_133_start_0 arc-tkincaidlx.wsicorp.com" -> "IPaddr_147_81_84_133_monitor_25000 arc-tkincaidlx.wsicorp.com" [ style = bold] "IPaddr_147_81_84_133_start_0 arc-tkincaidlx.wsicorp.com" -> "grp_pgsql_mirror_running_0" [ style = bold] "IPaddr_147_81_84_133_start_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" -> "IPaddr_147_81_84_133_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" -> "all_stopped" [ style = bold] "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" -> "grp_pgsql_mirror_stopped_0" [ style = bold] "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" -> "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "all_stopped" [ style=bold color="green" fontcolor="orange" ] "do_shutdown arc-dknightlx" [ style=bold color="green" fontcolor="black" ] "fs_mirror_start_0 arc-tkincaidlx.wsicorp.com" -> "grp_pgsql_mirror_running_0" [ style = bold] "fs_mirror_start_0 arc-tkincaidlx.wsicorp.com" -> "pgsql_5555_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "fs_mirror_start_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" -> "all_stopped" [ style = bold] "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" -> "fs_mirror_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" -> "grp_pgsql_mirror_stopped_0" [ style = bold] "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "grp_pgsql_mirror_running_0" [ style=bold color="green" fontcolor="orange" ] "grp_pgsql_mirror_start_0" -> "IPaddr_147_81_84_133_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_start_0" -> "fs_mirror_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_start_0" -> "grp_pgsql_mirror_running_0" [ style = bold] "grp_pgsql_mirror_start_0" -> "pgsql_5555_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_start_0" [ style=bold color="green" fontcolor="orange" ] "grp_pgsql_mirror_stop_0" -> "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_stop_0" -> "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_stop_0" -> "grp_pgsql_mirror_start_0" [ style = bold] "grp_pgsql_mirror_stop_0" -> "grp_pgsql_mirror_stopped_0" [ style = bold] "grp_pgsql_mirror_stop_0" -> "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_stop_0" [ style=bold color="green" fontcolor="orange" ] "grp_pgsql_mirror_stopped_0" -> "grp_pgsql_mirror_start_0" [ style = bold] "grp_pgsql_mirror_stopped_0" -> "ms_drbd_7788_demote_0" [ style = bold] +"grp_pgsql_mirror_stopped_0" -> "rsc_drbd_7788:1_demote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_stopped_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-post_notify_demoted_0" -> "ms_drbd_7788_pre_notify_promote_0" [ style = bold] "ms_drbd_7788_confirmed-post_notify_demoted_0" -> "ms_drbd_7788_pre_notify_start_0" [ style = bold] "ms_drbd_7788_confirmed-post_notify_demoted_0" -> "ms_drbd_7788_pre_notify_stop_0" [ style = bold] "ms_drbd_7788_confirmed-post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-post_notify_promoted_0" -> "grp_pgsql_mirror_start_0" [ style = bold] "ms_drbd_7788_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-post_notify_running_0" -> "ms_drbd_7788_pre_notify_promote_0" [ style = bold] "ms_drbd_7788_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] "ms_drbd_7788_confirmed-post_notify_stopped_0" -> "ms_drbd_7788_pre_notify_promote_0" [ style = bold] "ms_drbd_7788_confirmed-post_notify_stopped_0" -> "ms_drbd_7788_pre_notify_start_0" [ style = bold] "ms_drbd_7788_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-pre_notify_demote_0" -> "ms_drbd_7788_demote_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_demote_0" -> "ms_drbd_7788_post_notify_demoted_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-pre_notify_promote_0" -> "ms_drbd_7788_post_notify_promoted_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_promote_0" -> "ms_drbd_7788_promote_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-pre_notify_start_0" -> "ms_drbd_7788_post_notify_running_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_start_0" -> "ms_drbd_7788_start_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-pre_notify_stop_0" -> "ms_drbd_7788_post_notify_stopped_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_stop_0" -> "ms_drbd_7788_stop_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_demote_0" -> "ms_drbd_7788_demoted_0" [ style = bold] "ms_drbd_7788_demote_0" -> "ms_drbd_7788_stop_0" [ style = bold] "ms_drbd_7788_demote_0" -> "rsc_drbd_7788:1_demote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_demoted_0" -> "ms_drbd_7788_post_notify_demoted_0" [ style = bold] "ms_drbd_7788_demoted_0" -> "ms_drbd_7788_promote_0" [ style = bold] "ms_drbd_7788_demoted_0" -> "ms_drbd_7788_start_0" [ style = bold] "ms_drbd_7788_demoted_0" -> "ms_drbd_7788_stop_0" [ style = bold] "ms_drbd_7788_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_post_notify_demoted_0" -> "ms_drbd_7788_confirmed-post_notify_demoted_0" [ style = bold] "ms_drbd_7788_post_notify_demoted_0" -> "rsc_drbd_7788:0_post_notify_demote_0 arc-dknightlx" [ style = bold] "ms_drbd_7788_post_notify_demoted_0" -> "rsc_drbd_7788:1_post_notify_demote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_post_notify_promoted_0" -> "ms_drbd_7788_confirmed-post_notify_promoted_0" [ style = bold] "ms_drbd_7788_post_notify_promoted_0" -> "rsc_drbd_7788:1_post_notify_promote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_post_notify_running_0" -> "ms_drbd_7788_confirmed-post_notify_running_0" [ style = bold] "ms_drbd_7788_post_notify_running_0" -> "rsc_drbd_7788:1_post_notify_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_post_notify_stopped_0" -> "ms_drbd_7788_confirmed-post_notify_stopped_0" [ style = bold] "ms_drbd_7788_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_pre_notify_demote_0" -> "ms_drbd_7788_confirmed-pre_notify_demote_0" [ style = bold] "ms_drbd_7788_pre_notify_demote_0" -> "rsc_drbd_7788:0_pre_notify_demote_0 arc-dknightlx" [ style = bold] "ms_drbd_7788_pre_notify_demote_0" -> "rsc_drbd_7788:1_pre_notify_demote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_pre_notify_promote_0" -> "ms_drbd_7788_confirmed-pre_notify_promote_0" [ style = bold] "ms_drbd_7788_pre_notify_promote_0" -> "rsc_drbd_7788:1_pre_notify_promote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_pre_notify_start_0" -> "ms_drbd_7788_confirmed-pre_notify_start_0" [ style = bold] "ms_drbd_7788_pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_pre_notify_stop_0" -> "ms_drbd_7788_confirmed-pre_notify_stop_0" [ style = bold] "ms_drbd_7788_pre_notify_stop_0" -> "rsc_drbd_7788:0_pre_notify_stop_0 arc-dknightlx" [ style = bold] "ms_drbd_7788_pre_notify_stop_0" -> "rsc_drbd_7788:1_pre_notify_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_promote_0" -> "rsc_drbd_7788:1_promote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_promote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_promoted_0" -> "grp_pgsql_mirror_start_0" [ style = bold] "ms_drbd_7788_promoted_0" -> "ms_drbd_7788_post_notify_promoted_0" [ style = bold] "ms_drbd_7788_promoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_running_0" -> "ms_drbd_7788_post_notify_running_0" [ style = bold] "ms_drbd_7788_running_0" -> "ms_drbd_7788_promote_0" [ style = bold] "ms_drbd_7788_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_start_0" -> "ms_drbd_7788_promote_0" [ style = bold] "ms_drbd_7788_start_0" -> "ms_drbd_7788_running_0" [ style = bold] "ms_drbd_7788_start_0" -> "rsc_drbd_7788:1_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_stop_0" -> "ms_drbd_7788_start_0" [ style = bold] "ms_drbd_7788_stop_0" -> "ms_drbd_7788_stopped_0" [ style = bold] "ms_drbd_7788_stop_0" -> "rsc_drbd_7788:0_stop_0 arc-dknightlx" [ style = bold] "ms_drbd_7788_stop_0" -> "rsc_drbd_7788:1_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_stopped_0" -> "ms_drbd_7788_post_notify_stopped_0" [ style = bold] "ms_drbd_7788_stopped_0" -> "ms_drbd_7788_promote_0" [ style = bold] "ms_drbd_7788_stopped_0" -> "ms_drbd_7788_start_0" [ style = bold] "ms_drbd_7788_stopped_0" [ style=bold color="green" fontcolor="orange" ] "pgsql_5555_monitor_30000 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "pgsql_5555_start_0 arc-tkincaidlx.wsicorp.com" -> "IPaddr_147_81_84_133_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "pgsql_5555_start_0 arc-tkincaidlx.wsicorp.com" -> "grp_pgsql_mirror_running_0" [ style = bold] "pgsql_5555_start_0 arc-tkincaidlx.wsicorp.com" -> "pgsql_5555_monitor_30000 arc-tkincaidlx.wsicorp.com" [ style = bold] "pgsql_5555_start_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" -> "all_stopped" [ style = bold] "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" -> "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" -> "grp_pgsql_mirror_stopped_0" [ style = bold] "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" -> "pgsql_5555_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:0_post_notify_demote_0 arc-dknightlx" -> "ms_drbd_7788_confirmed-post_notify_demoted_0" [ style = bold] "rsc_drbd_7788:0_post_notify_demote_0 arc-dknightlx" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:0_pre_notify_demote_0 arc-dknightlx" -> "ms_drbd_7788_confirmed-pre_notify_demote_0" [ style = bold] "rsc_drbd_7788:0_pre_notify_demote_0 arc-dknightlx" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:0_pre_notify_stop_0 arc-dknightlx" -> "ms_drbd_7788_confirmed-pre_notify_stop_0" [ style = bold] "rsc_drbd_7788:0_pre_notify_stop_0 arc-dknightlx" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:0_stop_0 arc-dknightlx" -> "all_stopped" [ style = bold] "rsc_drbd_7788:0_stop_0 arc-dknightlx" -> "do_shutdown arc-dknightlx" [ style = bold] "rsc_drbd_7788:0_stop_0 arc-dknightlx" -> "ms_drbd_7788_stopped_0" [ style = bold] "rsc_drbd_7788:0_stop_0 arc-dknightlx" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_demote_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_demoted_0" [ style = bold] "rsc_drbd_7788:1_demote_0 arc-tkincaidlx.wsicorp.com" -> "rsc_drbd_7788:1_promote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "rsc_drbd_7788:1_demote_0 arc-tkincaidlx.wsicorp.com" -> "rsc_drbd_7788:1_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "rsc_drbd_7788:1_demote_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_post_notify_demote_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_confirmed-post_notify_demoted_0" [ style = bold] "rsc_drbd_7788:1_post_notify_demote_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_post_notify_promote_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_confirmed-post_notify_promoted_0" [ style = bold] "rsc_drbd_7788:1_post_notify_promote_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_post_notify_start_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_confirmed-post_notify_running_0" [ style = bold] "rsc_drbd_7788:1_post_notify_start_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_pre_notify_demote_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_confirmed-pre_notify_demote_0" [ style = bold] "rsc_drbd_7788:1_pre_notify_demote_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_pre_notify_promote_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_confirmed-pre_notify_promote_0" [ style = bold] "rsc_drbd_7788:1_pre_notify_promote_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_pre_notify_stop_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_confirmed-pre_notify_stop_0" [ style = bold] "rsc_drbd_7788:1_pre_notify_stop_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_promote_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_promoted_0" [ style = bold] "rsc_drbd_7788:1_promote_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_start_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_running_0" [ style = bold] "rsc_drbd_7788:1_start_0 arc-tkincaidlx.wsicorp.com" -> "rsc_drbd_7788:1_promote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "rsc_drbd_7788:1_start_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_stop_0 arc-tkincaidlx.wsicorp.com" -> "all_stopped" [ style = bold] "rsc_drbd_7788:1_stop_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_stopped_0" [ style = bold] "rsc_drbd_7788:1_stop_0 arc-tkincaidlx.wsicorp.com" -> "rsc_drbd_7788:1_start_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "rsc_drbd_7788:1_stop_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/bug-1572-1.exp b/pengine/test10/bug-1572-1.exp index 25460895b2..274b2d61bd 100644 --- a/pengine/test10/bug-1572-1.exp +++ b/pengine/test10/bug-1572-1.exp @@ -1,806 +1,809 @@ + + + diff --git a/pengine/test10/bug-1572-2.dot b/pengine/test10/bug-1572-2.dot index c8b3dd9154..d7979b365a 100644 --- a/pengine/test10/bug-1572-2.dot +++ b/pengine/test10/bug-1572-2.dot @@ -1,79 +1,80 @@ digraph "g" { "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" -> "all_stopped" [ style = bold] "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" -> "grp_pgsql_mirror_stopped_0" [ style = bold] "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" -> "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "all_stopped" [ style=bold color="green" fontcolor="orange" ] "do_shutdown arc-dknightlx" [ style=bold color="green" fontcolor="black" ] "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" -> "all_stopped" [ style = bold] "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" -> "grp_pgsql_mirror_stopped_0" [ style = bold] "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "grp_pgsql_mirror_stop_0" -> "IPaddr_147_81_84_133_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_stop_0" -> "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_stop_0" -> "grp_pgsql_mirror_stopped_0" [ style = bold] "grp_pgsql_mirror_stop_0" -> "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_stop_0" [ style=bold color="green" fontcolor="orange" ] "grp_pgsql_mirror_stopped_0" -> "ms_drbd_7788_demote_0" [ style = bold] +"grp_pgsql_mirror_stopped_0" -> "rsc_drbd_7788:1_demote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "grp_pgsql_mirror_stopped_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-post_notify_demoted_0" -> "ms_drbd_7788_pre_notify_stop_0" [ style = bold] "ms_drbd_7788_confirmed-post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] "ms_drbd_7788_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-pre_notify_demote_0" -> "ms_drbd_7788_demote_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_demote_0" -> "ms_drbd_7788_post_notify_demoted_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_confirmed-pre_notify_stop_0" -> "ms_drbd_7788_post_notify_stopped_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_stop_0" -> "ms_drbd_7788_stop_0" [ style = bold] "ms_drbd_7788_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_demote_0" -> "ms_drbd_7788_demoted_0" [ style = bold] "ms_drbd_7788_demote_0" -> "ms_drbd_7788_stop_0" [ style = bold] "ms_drbd_7788_demote_0" -> "rsc_drbd_7788:1_demote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_demoted_0" -> "ms_drbd_7788_post_notify_demoted_0" [ style = bold] "ms_drbd_7788_demoted_0" -> "ms_drbd_7788_stop_0" [ style = bold] "ms_drbd_7788_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_post_notify_demoted_0" -> "ms_drbd_7788_confirmed-post_notify_demoted_0" [ style = bold] "ms_drbd_7788_post_notify_demoted_0" -> "rsc_drbd_7788:0_post_notify_demote_0 arc-dknightlx" [ style = bold] "ms_drbd_7788_post_notify_demoted_0" -> "rsc_drbd_7788:1_post_notify_demote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_post_notify_stopped_0" -> "ms_drbd_7788_confirmed-post_notify_stopped_0" [ style = bold] "ms_drbd_7788_post_notify_stopped_0" -> "rsc_drbd_7788:1_post_notify_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_pre_notify_demote_0" -> "ms_drbd_7788_confirmed-pre_notify_demote_0" [ style = bold] "ms_drbd_7788_pre_notify_demote_0" -> "rsc_drbd_7788:0_pre_notify_demote_0 arc-dknightlx" [ style = bold] "ms_drbd_7788_pre_notify_demote_0" -> "rsc_drbd_7788:1_pre_notify_demote_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_pre_notify_stop_0" -> "ms_drbd_7788_confirmed-pre_notify_stop_0" [ style = bold] "ms_drbd_7788_pre_notify_stop_0" -> "rsc_drbd_7788:0_pre_notify_stop_0 arc-dknightlx" [ style = bold] "ms_drbd_7788_pre_notify_stop_0" -> "rsc_drbd_7788:1_pre_notify_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "ms_drbd_7788_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_stop_0" -> "ms_drbd_7788_stopped_0" [ style = bold] "ms_drbd_7788_stop_0" -> "rsc_drbd_7788:0_stop_0 arc-dknightlx" [ style = bold] "ms_drbd_7788_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_7788_stopped_0" -> "ms_drbd_7788_post_notify_stopped_0" [ style = bold] "ms_drbd_7788_stopped_0" [ style=bold color="green" fontcolor="orange" ] "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" -> "all_stopped" [ style = bold] "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" -> "fs_mirror_stop_0 arc-tkincaidlx.wsicorp.com" [ style = bold] "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" -> "grp_pgsql_mirror_stopped_0" [ style = bold] "pgsql_5555_stop_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:0_post_notify_demote_0 arc-dknightlx" -> "ms_drbd_7788_confirmed-post_notify_demoted_0" [ style = bold] "rsc_drbd_7788:0_post_notify_demote_0 arc-dknightlx" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:0_pre_notify_demote_0 arc-dknightlx" -> "ms_drbd_7788_confirmed-pre_notify_demote_0" [ style = bold] "rsc_drbd_7788:0_pre_notify_demote_0 arc-dknightlx" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:0_pre_notify_stop_0 arc-dknightlx" -> "ms_drbd_7788_confirmed-pre_notify_stop_0" [ style = bold] "rsc_drbd_7788:0_pre_notify_stop_0 arc-dknightlx" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:0_stop_0 arc-dknightlx" -> "all_stopped" [ style = bold] "rsc_drbd_7788:0_stop_0 arc-dknightlx" -> "do_shutdown arc-dknightlx" [ style = bold] "rsc_drbd_7788:0_stop_0 arc-dknightlx" -> "ms_drbd_7788_stopped_0" [ style = bold] "rsc_drbd_7788:0_stop_0 arc-dknightlx" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_demote_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_demoted_0" [ style = bold] "rsc_drbd_7788:1_demote_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_post_notify_demote_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_confirmed-post_notify_demoted_0" [ style = bold] "rsc_drbd_7788:1_post_notify_demote_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_post_notify_stop_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_confirmed-post_notify_stopped_0" [ style = bold] "rsc_drbd_7788:1_post_notify_stop_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_pre_notify_demote_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_confirmed-pre_notify_demote_0" [ style = bold] "rsc_drbd_7788:1_pre_notify_demote_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] "rsc_drbd_7788:1_pre_notify_stop_0 arc-tkincaidlx.wsicorp.com" -> "ms_drbd_7788_confirmed-pre_notify_stop_0" [ style = bold] "rsc_drbd_7788:1_pre_notify_stop_0 arc-tkincaidlx.wsicorp.com" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/bug-1572-2.exp b/pengine/test10/bug-1572-2.exp index 3c10435940..0234ed3544 100644 --- a/pengine/test10/bug-1572-2.exp +++ b/pengine/test10/bug-1572-2.exp @@ -1,412 +1,415 @@ + + + diff --git a/pengine/test10/bug-lf-2153.dot b/pengine/test10/bug-lf-2153.dot new file mode 100644 index 0000000000..d58b78c91a --- /dev/null +++ b/pengine/test10/bug-lf-2153.dot @@ -0,0 +1,36 @@ +digraph "g" { +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"cl_tgtd_stop_0" -> "cl_tgtd_stopped_0" [ style = bold] +"cl_tgtd_stop_0" -> "res_tgtd:0_stop_0 bob" [ style = bold] +"cl_tgtd_stop_0" [ style=bold color="green" fontcolor="orange" ] +"cl_tgtd_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_iscsivg01_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] +"ms_drbd_iscsivg01_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_iscsivg01_confirmed-pre_notify_stop_0" -> "ms_drbd_iscsivg01_post_notify_stopped_0" [ style = bold] +"ms_drbd_iscsivg01_confirmed-pre_notify_stop_0" -> "ms_drbd_iscsivg01_stop_0" [ style = bold] +"ms_drbd_iscsivg01_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_iscsivg01_post_notify_stopped_0" -> "ms_drbd_iscsivg01_confirmed-post_notify_stopped_0" [ style = bold] +"ms_drbd_iscsivg01_post_notify_stopped_0" -> "res_drbd_iscsivg01:1_post_notify_stop_0 alice" [ style = bold] +"ms_drbd_iscsivg01_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_iscsivg01_pre_notify_stop_0" -> "ms_drbd_iscsivg01_confirmed-pre_notify_stop_0" [ style = bold] +"ms_drbd_iscsivg01_pre_notify_stop_0" -> "res_drbd_iscsivg01:0_pre_notify_stop_0 bob" [ style = bold] +"ms_drbd_iscsivg01_pre_notify_stop_0" -> "res_drbd_iscsivg01:1_pre_notify_stop_0 alice" [ style = bold] +"ms_drbd_iscsivg01_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_iscsivg01_stop_0" -> "ms_drbd_iscsivg01_stopped_0" [ style = bold] +"ms_drbd_iscsivg01_stop_0" -> "res_drbd_iscsivg01:0_stop_0 bob" [ style = bold] +"ms_drbd_iscsivg01_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_iscsivg01_stopped_0" -> "ms_drbd_iscsivg01_post_notify_stopped_0" [ style = bold] +"ms_drbd_iscsivg01_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"res_drbd_iscsivg01:0_pre_notify_stop_0 bob" -> "ms_drbd_iscsivg01_confirmed-pre_notify_stop_0" [ style = bold] +"res_drbd_iscsivg01:0_pre_notify_stop_0 bob" [ style=bold color="green" fontcolor="black" ] +"res_drbd_iscsivg01:0_stop_0 bob" -> "all_stopped" [ style = bold] +"res_drbd_iscsivg01:0_stop_0 bob" -> "ms_drbd_iscsivg01_stopped_0" [ style = bold] +"res_drbd_iscsivg01:0_stop_0 bob" [ style=bold color="green" fontcolor="black" ] +"res_drbd_iscsivg01:1_post_notify_stop_0 alice" -> "ms_drbd_iscsivg01_confirmed-post_notify_stopped_0" [ style = bold] +"res_drbd_iscsivg01:1_post_notify_stop_0 alice" [ style=bold color="green" fontcolor="black" ] +"res_drbd_iscsivg01:1_pre_notify_stop_0 alice" -> "ms_drbd_iscsivg01_confirmed-pre_notify_stop_0" [ style = bold] +"res_drbd_iscsivg01:1_pre_notify_stop_0 alice" [ style=bold color="green" fontcolor="black" ] +"res_tgtd:0_stop_0 bob" -> "all_stopped" [ style = bold] +"res_tgtd:0_stop_0 bob" -> "cl_tgtd_stopped_0" [ style = bold] +"res_tgtd:0_stop_0 bob" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/bug-lf-2153.exp b/pengine/test10/bug-lf-2153.exp new file mode 100644 index 0000000000..51fa7cb29c --- /dev/null +++ b/pengine/test10/bug-lf-2153.exp @@ -0,0 +1,192 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/bug-lf-2153.scores b/pengine/test10/bug-lf-2153.scores new file mode 100644 index 0000000000..3979aff148 --- /dev/null +++ b/pengine/test10/bug-lf-2153.scores @@ -0,0 +1,55 @@ +Allocation scores: +clone_color: ms_drbd_iscsivg01 allocation score on alice: 1500 +clone_color: ms_drbd_iscsivg01 allocation score on bob: -1000000 +clone_color: res_drbd_iscsivg01:0 allocation score on alice: 0 +clone_color: res_drbd_iscsivg01:0 allocation score on bob: 1 +clone_color: res_drbd_iscsivg01:1 allocation score on alice: 101 +clone_color: res_drbd_iscsivg01:1 allocation score on bob: 0 +native_color: res_drbd_iscsivg01:1 allocation score on alice: 101 +native_color: res_drbd_iscsivg01:1 allocation score on bob: -1000000 +native_color: res_drbd_iscsivg01:0 allocation score on alice: -1000000 +native_color: res_drbd_iscsivg01:0 allocation score on bob: -1000000 +res_drbd_iscsivg01:1 promotion score on alice: 3100 +res_drbd_iscsivg01:0 promotion score on none: 0 +clone_color: cl_tgtd allocation score on alice: 1500 +clone_color: cl_tgtd allocation score on bob: -1000000 +clone_color: res_tgtd:0 allocation score on alice: 0 +clone_color: res_tgtd:0 allocation score on bob: 1 +clone_color: res_tgtd:1 allocation score on alice: 1 +clone_color: res_tgtd:1 allocation score on bob: 0 +native_color: res_tgtd:1 allocation score on alice: 1 +native_color: res_tgtd:1 allocation score on bob: -1000000 +native_color: res_tgtd:0 allocation score on alice: -1000000 +native_color: res_tgtd:0 allocation score on bob: -1000000 +group_color: rg_iscsivg01 allocation score on alice: 100 +group_color: rg_iscsivg01 allocation score on bob: 0 +group_color: res_portblock_iscsivg01_block allocation score on alice: 300 +group_color: res_portblock_iscsivg01_block allocation score on bob: 0 +group_color: res_lvm_iscsivg01 allocation score on alice: 200 +group_color: res_lvm_iscsivg01 allocation score on bob: 0 +group_color: res_target_iscsivg01 allocation score on alice: 200 +group_color: res_target_iscsivg01 allocation score on bob: 0 +group_color: res_lu_iscsivg01_lun1 allocation score on alice: 200 +group_color: res_lu_iscsivg01_lun1 allocation score on bob: 0 +group_color: res_lu_iscsivg01_lun2 allocation score on alice: 200 +group_color: res_lu_iscsivg01_lun2 allocation score on bob: 0 +group_color: res_ip_alicebob01 allocation score on alice: 200 +group_color: res_ip_alicebob01 allocation score on bob: 0 +group_color: res_portblock_iscsivg01_unblock allocation score on alice: 200 +group_color: res_portblock_iscsivg01_unblock allocation score on bob: 0 +res_drbd_iscsivg01:1 promotion score on alice: 1000000 +res_drbd_iscsivg01:0 promotion score on none: 0 +native_color: res_portblock_iscsivg01_block allocation score on alice: 1602 +native_color: res_portblock_iscsivg01_block allocation score on bob: -1000000 +native_color: res_lvm_iscsivg01 allocation score on alice: 1200 +native_color: res_lvm_iscsivg01 allocation score on bob: -1000000 +native_color: res_target_iscsivg01 allocation score on alice: 1000 +native_color: res_target_iscsivg01 allocation score on bob: -1000000 +native_color: res_lu_iscsivg01_lun1 allocation score on alice: 800 +native_color: res_lu_iscsivg01_lun1 allocation score on bob: -1000000 +native_color: res_lu_iscsivg01_lun2 allocation score on alice: 600 +native_color: res_lu_iscsivg01_lun2 allocation score on bob: -1000000 +native_color: res_ip_alicebob01 allocation score on alice: 400 +native_color: res_ip_alicebob01 allocation score on bob: -1000000 +native_color: res_portblock_iscsivg01_unblock allocation score on alice: 200 +native_color: res_portblock_iscsivg01_unblock allocation score on bob: -1000000 diff --git a/pengine/test10/bug-lf-2153.xml b/pengine/test10/bug-lf-2153.xml new file mode 100644 index 0000000000..9940d8a730 --- /dev/null +++ b/pengine/test10/bug-lf-2153.xml @@ -0,0 +1,293 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/bug-lf-2171.dot b/pengine/test10/bug-lf-2171.dot index b915c1b493..6d978c87cf 100644 --- a/pengine/test10/bug-lf-2171.dot +++ b/pengine/test10/bug-lf-2171.dot @@ -1,37 +1,38 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "cl_res_Dummy1_stop_0" -> "cl_res_Dummy1_stopped_0" [ style = bold] "cl_res_Dummy1_stop_0" -> "res_Dummy1:0_stop_0 xenserver2" [ style = bold] "cl_res_Dummy1_stop_0" -> "res_Dummy1:1_stop_0 xenserver1" [ style = bold] "cl_res_Dummy1_stop_0" [ style=bold color="green" fontcolor="orange" ] "cl_res_Dummy1_stopped_0" [ style=bold color="green" fontcolor="orange" ] "gr_Dummy_running_0" [ style=dashed color="red" fontcolor="orange" ] "gr_Dummy_stop_0" -> "gr_Dummy_stopped_0" [ style = bold] "gr_Dummy_stop_0" -> "res_Dummy2_stop_0 xenserver1" [ style = bold] "gr_Dummy_stop_0" -> "res_Dummy3_stop_0 xenserver1" [ style = bold] "gr_Dummy_stop_0" [ style=bold color="green" fontcolor="orange" ] "gr_Dummy_stopped_0" -> "cl_res_Dummy1_stop_0" [ style = bold] +"gr_Dummy_stopped_0" -> "res_Dummy1:1_stop_0 xenserver1" [ style = bold] "gr_Dummy_stopped_0" [ style=bold color="green" fontcolor="orange" ] "res_Dummy1:0_stop_0 xenserver2" -> "all_stopped" [ style = bold] "res_Dummy1:0_stop_0 xenserver2" -> "cl_res_Dummy1_stopped_0" [ style = bold] "res_Dummy1:0_stop_0 xenserver2" [ style=bold color="green" fontcolor="black" ] "res_Dummy1:1_stop_0 xenserver1" -> "all_stopped" [ style = bold] "res_Dummy1:1_stop_0 xenserver1" -> "cl_res_Dummy1_stopped_0" [ style = bold] "res_Dummy1:1_stop_0 xenserver1" [ style=bold color="green" fontcolor="black" ] "res_Dummy2_monitor_10000 xenserver1" [ style=dashed color="red" fontcolor="black" ] "res_Dummy2_start_0 xenserver1" -> "gr_Dummy_running_0" [ style = dashed] "res_Dummy2_start_0 xenserver1" -> "res_Dummy2_monitor_10000 xenserver1" [ style = dashed] "res_Dummy2_start_0 xenserver1" [ style=dashed color="red" fontcolor="black" ] "res_Dummy2_stop_0 xenserver1" -> "all_stopped" [ style = bold] "res_Dummy2_stop_0 xenserver1" -> "gr_Dummy_stopped_0" [ style = bold] "res_Dummy2_stop_0 xenserver1" -> "res_Dummy2_start_0 xenserver1" [ style = dashed] "res_Dummy2_stop_0 xenserver1" [ style=bold color="green" fontcolor="black" ] "res_Dummy3_monitor_10000 xenserver1" [ style=dashed color="red" fontcolor="black" ] "res_Dummy3_start_0 xenserver1" -> "gr_Dummy_running_0" [ style = dashed] "res_Dummy3_start_0 xenserver1" -> "res_Dummy3_monitor_10000 xenserver1" [ style = dashed] "res_Dummy3_start_0 xenserver1" [ style=dashed color="red" fontcolor="black" ] "res_Dummy3_stop_0 xenserver1" -> "all_stopped" [ style = bold] "res_Dummy3_stop_0 xenserver1" -> "gr_Dummy_stopped_0" [ style = bold] "res_Dummy3_stop_0 xenserver1" -> "res_Dummy3_start_0 xenserver1" [ style = dashed] "res_Dummy3_stop_0 xenserver1" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/bug-lf-2171.exp b/pengine/test10/bug-lf-2171.exp index 39277ac3aa..72c0d453da 100644 --- a/pengine/test10/bug-lf-2171.exp +++ b/pengine/test10/bug-lf-2171.exp @@ -1,132 +1,135 @@ + + + diff --git a/pengine/test10/clone-no-shuffle.dot b/pengine/test10/clone-no-shuffle.dot index 51751b0e84..04f2fbfd79 100644 --- a/pengine/test10/clone-no-shuffle.dot +++ b/pengine/test10/clone-no-shuffle.dot @@ -1,97 +1,98 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "drbd1:0_demote_0 dktest2sles10" -> "drbd1:0_stop_0 dktest2sles10" [ style = bold] "drbd1:0_demote_0 dktest2sles10" -> "ms-drbd1_demoted_0" [ style = bold] "drbd1:0_demote_0 dktest2sles10" [ style=bold color="green" fontcolor="black" ] "drbd1:0_post_notify_demote_0 dktest2sles10" -> "ms-drbd1_confirmed-post_notify_demoted_0" [ style = bold] "drbd1:0_post_notify_demote_0 dktest2sles10" [ style=bold color="green" fontcolor="black" ] "drbd1:0_pre_notify_demote_0 dktest2sles10" -> "ms-drbd1_confirmed-pre_notify_demote_0" [ style = bold] "drbd1:0_pre_notify_demote_0 dktest2sles10" [ style=bold color="green" fontcolor="black" ] "drbd1:0_pre_notify_stop_0 dktest2sles10" -> "ms-drbd1_confirmed-pre_notify_stop_0" [ style = bold] "drbd1:0_pre_notify_stop_0 dktest2sles10" [ style=bold color="green" fontcolor="black" ] "drbd1:0_stop_0 dktest2sles10" -> "all_stopped" [ style = bold] "drbd1:0_stop_0 dktest2sles10" -> "ms-drbd1_stopped_0" [ style = bold] "drbd1:0_stop_0 dktest2sles10" [ style=bold color="green" fontcolor="black" ] "drbd1:1_monitor_0 dktest1sles10" -> "probe_complete dktest1sles10" [ style = bold] "drbd1:1_monitor_0 dktest1sles10" [ style=bold color="green" fontcolor="black" ] "drbd1:1_monitor_11000 dktest1sles10" [ style=bold color="green" fontcolor="black" ] "drbd1:1_post_notify_start_0 dktest1sles10" -> "ms-drbd1_confirmed-post_notify_running_0" [ style = bold] "drbd1:1_post_notify_start_0 dktest1sles10" [ style=bold color="green" fontcolor="black" ] "drbd1:1_start_0 dktest1sles10" -> "drbd1:1_monitor_11000 dktest1sles10" [ style = bold] "drbd1:1_start_0 dktest1sles10" -> "ms-drbd1_running_0" [ style = bold] "drbd1:1_start_0 dktest1sles10" [ style=bold color="green" fontcolor="black" ] "ms-drbd1_confirmed-post_notify_demoted_0" -> "ms-drbd1_pre_notify_start_0" [ style = bold] "ms-drbd1_confirmed-post_notify_demoted_0" -> "ms-drbd1_pre_notify_stop_0" [ style = bold] "ms-drbd1_confirmed-post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_confirmed-post_notify_running_0" -> "drbd1:1_monitor_11000 dktest1sles10" [ style = bold] "ms-drbd1_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] "ms-drbd1_confirmed-post_notify_stopped_0" -> "ms-drbd1_pre_notify_start_0" [ style = bold] "ms-drbd1_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_confirmed-pre_notify_demote_0" -> "ms-drbd1_demote_0" [ style = bold] "ms-drbd1_confirmed-pre_notify_demote_0" -> "ms-drbd1_post_notify_demoted_0" [ style = bold] "ms-drbd1_confirmed-pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_confirmed-pre_notify_start_0" -> "ms-drbd1_post_notify_running_0" [ style = bold] "ms-drbd1_confirmed-pre_notify_start_0" -> "ms-drbd1_start_0" [ style = bold] "ms-drbd1_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_confirmed-pre_notify_stop_0" -> "ms-drbd1_post_notify_stopped_0" [ style = bold] "ms-drbd1_confirmed-pre_notify_stop_0" -> "ms-drbd1_stop_0" [ style = bold] "ms-drbd1_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_demote_0" -> "drbd1:0_demote_0 dktest2sles10" [ style = bold] "ms-drbd1_demote_0" -> "ms-drbd1_demoted_0" [ style = bold] "ms-drbd1_demote_0" -> "ms-drbd1_stop_0" [ style = bold] "ms-drbd1_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_demoted_0" -> "ms-drbd1_post_notify_demoted_0" [ style = bold] "ms-drbd1_demoted_0" -> "ms-drbd1_start_0" [ style = bold] "ms-drbd1_demoted_0" -> "ms-drbd1_stop_0" [ style = bold] "ms-drbd1_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_post_notify_demoted_0" -> "drbd1:0_post_notify_demote_0 dktest2sles10" [ style = bold] "ms-drbd1_post_notify_demoted_0" -> "ms-drbd1_confirmed-post_notify_demoted_0" [ style = bold] "ms-drbd1_post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_post_notify_running_0" -> "drbd1:1_post_notify_start_0 dktest1sles10" [ style = bold] "ms-drbd1_post_notify_running_0" -> "ms-drbd1_confirmed-post_notify_running_0" [ style = bold] "ms-drbd1_post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_post_notify_stopped_0" -> "ms-drbd1_confirmed-post_notify_stopped_0" [ style = bold] "ms-drbd1_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_pre_notify_demote_0" -> "drbd1:0_pre_notify_demote_0 dktest2sles10" [ style = bold] "ms-drbd1_pre_notify_demote_0" -> "ms-drbd1_confirmed-pre_notify_demote_0" [ style = bold] "ms-drbd1_pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_pre_notify_start_0" -> "ms-drbd1_confirmed-pre_notify_start_0" [ style = bold] "ms-drbd1_pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_pre_notify_stop_0" -> "drbd1:0_pre_notify_stop_0 dktest2sles10" [ style = bold] "ms-drbd1_pre_notify_stop_0" -> "ms-drbd1_confirmed-pre_notify_stop_0" [ style = bold] "ms-drbd1_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_running_0" -> "ms-drbd1_post_notify_running_0" [ style = bold] "ms-drbd1_running_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_start_0" -> "drbd1:1_start_0 dktest1sles10" [ style = bold] "ms-drbd1_start_0" -> "ms-drbd1_running_0" [ style = bold] "ms-drbd1_start_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_stop_0" -> "drbd1:0_stop_0 dktest2sles10" [ style = bold] "ms-drbd1_stop_0" -> "ms-drbd1_start_0" [ style = bold] "ms-drbd1_stop_0" -> "ms-drbd1_stopped_0" [ style = bold] "ms-drbd1_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd1_stopped_0" -> "ms-drbd1_post_notify_stopped_0" [ style = bold] "ms-drbd1_stopped_0" -> "ms-drbd1_start_0" [ style = bold] "ms-drbd1_stopped_0" [ style=bold color="green" fontcolor="orange" ] "probe_complete dktest1sles10" -> "probe_complete" [ style = bold] "probe_complete dktest1sles10" [ style=bold color="green" fontcolor="black" ] "probe_complete dktest2sles10" -> "probe_complete" [ style = bold] "probe_complete dktest2sles10" [ style=bold color="green" fontcolor="black" ] "probe_complete" -> "drbd1:0_stop_0 dktest2sles10" [ style = bold] "probe_complete" -> "drbd1:1_start_0 dktest1sles10" [ style = bold] "probe_complete" -> "ms-drbd1_start_0" [ style = bold] "probe_complete" -> "ms-drbd1_stop_0" [ style = bold] "probe_complete" -> "stonith-1_start_0 dktest2sles10" [ style = bold] "probe_complete" -> "testip_stop_0 dktest2sles10" [ style = bold] "probe_complete" [ style=bold color="green" fontcolor="orange" ] "stonith-1_monitor_0 dktest1sles10" -> "probe_complete dktest1sles10" [ style = bold] "stonith-1_monitor_0 dktest1sles10" [ style=bold color="green" fontcolor="black" ] "stonith-1_monitor_0 dktest2sles10" -> "probe_complete dktest2sles10" [ style = bold] "stonith-1_monitor_0 dktest2sles10" [ style=bold color="green" fontcolor="black" ] "stonith-1_start_0 dktest2sles10" [ style=bold color="green" fontcolor="black" ] "testip_monitor_0 dktest1sles10" -> "probe_complete dktest1sles10" [ style = bold] "testip_monitor_0 dktest1sles10" [ style=bold color="green" fontcolor="black" ] "testip_stop_0 dktest2sles10" -> "all_stopped" [ style = bold] +"testip_stop_0 dktest2sles10" -> "drbd1:0_demote_0 dktest2sles10" [ style = bold] "testip_stop_0 dktest2sles10" -> "ms-drbd1_demote_0" [ style = bold] "testip_stop_0 dktest2sles10" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/clone-no-shuffle.exp b/pengine/test10/clone-no-shuffle.exp index 724f86c664..acae77751a 100644 --- a/pengine/test10/clone-no-shuffle.exp +++ b/pengine/test10/clone-no-shuffle.exp @@ -1,513 +1,516 @@ + + + diff --git a/pengine/test10/coloc-negative-group.dot b/pengine/test10/coloc-negative-group.dot new file mode 100644 index 0000000000..d4d03eb797 --- /dev/null +++ b/pengine/test10/coloc-negative-group.dot @@ -0,0 +1,4 @@ +digraph "g" { +"Cancel res_Dummy_1_monitor_10000 lenny-b" [ style=bold color="green" fontcolor="black" ] +"Cancel res_Dummy_3_monitor_10000 lenny-a" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/coloc-negative-group.exp b/pengine/test10/coloc-negative-group.exp new file mode 100644 index 0000000000..4dc9b595b9 --- /dev/null +++ b/pengine/test10/coloc-negative-group.exp @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/coloc-negative-group.scores b/pengine/test10/coloc-negative-group.scores new file mode 100644 index 0000000000..c0443ce9fc --- /dev/null +++ b/pengine/test10/coloc-negative-group.scores @@ -0,0 +1,13 @@ +Allocation scores: +group_color: grp_1 allocation score on lenny-a: 0 +group_color: grp_1 allocation score on lenny-b: 0 +group_color: res_Dummy_1 allocation score on lenny-a: 0 +group_color: res_Dummy_1 allocation score on lenny-b: 100 +group_color: res_Dummy_2 allocation score on lenny-a: 0 +group_color: res_Dummy_2 allocation score on lenny-b: 1000000 +native_color: res_Dummy_3 allocation score on lenny-a: 100 +native_color: res_Dummy_3 allocation score on lenny-b: -1000000 +native_color: res_Dummy_1 allocation score on lenny-a: -1000000 +native_color: res_Dummy_1 allocation score on lenny-b: 1000000 +native_color: res_Dummy_2 allocation score on lenny-a: -1000000 +native_color: res_Dummy_2 allocation score on lenny-b: 1000000 diff --git a/pengine/test10/coloc-negative-group.xml b/pengine/test10/coloc-negative-group.xml new file mode 100644 index 0000000000..a276f1c4d6 --- /dev/null +++ b/pengine/test10/coloc-negative-group.xml @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pengine/test10/inc1.dot b/pengine/test10/inc1.dot index d42c173fa5..f93798de7f 100644 --- a/pengine/test10/inc1.dot +++ b/pengine/test10/inc1.dot @@ -1,64 +1,66 @@ digraph "g" { "child_rsc2:0_monitor_0 node1" -> "probe_complete node1" [ style = bold] "child_rsc2:0_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "child_rsc2:0_monitor_0 node2" -> "probe_complete node2" [ style = bold] "child_rsc2:0_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] "child_rsc2:0_start_0 node2" -> "rsc2_running_0" [ style = bold] "child_rsc2:0_start_0 node2" [ style=bold color="green" fontcolor="black" ] "child_rsc2:1_monitor_0 node1" -> "probe_complete node1" [ style = bold] "child_rsc2:1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "child_rsc2:1_monitor_0 node2" -> "probe_complete node2" [ style = bold] "child_rsc2:1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] "child_rsc2:1_start_0 node1" -> "rsc2_running_0" [ style = bold] "child_rsc2:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] "child_rsc2:2_monitor_0 node1" -> "probe_complete node1" [ style = bold] "child_rsc2:2_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "child_rsc2:2_monitor_0 node2" -> "probe_complete node2" [ style = bold] "child_rsc2:2_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] "child_rsc2:2_start_0 node2" -> "rsc2_running_0" [ style = bold] "child_rsc2:2_start_0 node2" [ style=bold color="green" fontcolor="black" ] "child_rsc2:3_monitor_0 node1" -> "probe_complete node1" [ style = bold] "child_rsc2:3_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "child_rsc2:3_monitor_0 node2" -> "probe_complete node2" [ style = bold] "child_rsc2:3_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] "child_rsc2:3_start_0 node1" -> "rsc2_running_0" [ style = bold] "child_rsc2:3_start_0 node1" [ style=bold color="green" fontcolor="black" ] "child_rsc2:4_monitor_0 node1" -> "probe_complete node1" [ style = bold] "child_rsc2:4_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "child_rsc2:4_monitor_0 node2" -> "probe_complete node2" [ style = bold] "child_rsc2:4_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] "probe_complete node1" -> "probe_complete" [ style = bold] "probe_complete node1" [ style=bold color="green" fontcolor="black" ] "probe_complete node2" -> "probe_complete" [ style = bold] "probe_complete node2" [ style=bold color="green" fontcolor="black" ] "probe_complete" -> "child_rsc2:0_start_0 node2" [ style = bold] "probe_complete" -> "child_rsc2:1_start_0 node1" [ style = bold] "probe_complete" -> "child_rsc2:2_start_0 node2" [ style = bold] "probe_complete" -> "child_rsc2:3_start_0 node1" [ style = bold] "probe_complete" -> "rsc1_start_0 node1" [ style = bold] "probe_complete" -> "rsc2_start_0" [ style = bold] "probe_complete" -> "rsc2_stop_0" [ style = bold] "probe_complete" -> "rsc3_start_0 node2" [ style = bold] "probe_complete" [ style=bold color="green" fontcolor="orange" ] "rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] "rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] "rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] +"rsc1_start_0 node1" -> "child_rsc2:1_start_0 node1" [ style = bold] +"rsc1_start_0 node1" -> "child_rsc2:3_start_0 node1" [ style = bold] "rsc1_start_0 node1" -> "rsc2_start_0" [ style = bold] "rsc1_start_0 node1" [ style=bold color="green" fontcolor="black" ] "rsc2_running_0" -> "rsc3_start_0 node2" [ style = bold] "rsc2_running_0" [ style=bold color="green" fontcolor="orange" ] "rsc2_start_0" -> "child_rsc2:0_start_0 node2" [ style = bold] "rsc2_start_0" -> "child_rsc2:1_start_0 node1" [ style = bold] "rsc2_start_0" -> "child_rsc2:2_start_0 node2" [ style = bold] "rsc2_start_0" -> "child_rsc2:3_start_0 node1" [ style = bold] "rsc2_start_0" -> "rsc2_running_0" [ style = bold] "rsc2_start_0" [ style=bold color="green" fontcolor="orange" ] "rsc2_stop_0" -> "rsc2_start_0" [ style = bold] "rsc2_stop_0" [ style=bold color="green" fontcolor="orange" ] "rsc3_monitor_0 node1" -> "probe_complete node1" [ style = bold] "rsc3_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "rsc3_monitor_0 node2" -> "probe_complete node2" [ style = bold] "rsc3_monitor_0 node2" [ style=bold color="green" fontcolor="black" ] "rsc3_start_0 node2" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/inc1.exp b/pengine/test10/inc1.exp index 16a2798805..5bf3b3ed40 100644 --- a/pengine/test10/inc1.exp +++ b/pengine/test10/inc1.exp @@ -1,351 +1,357 @@ + + + + + + diff --git a/pengine/test10/master-demote.dot b/pengine/test10/master-demote.dot index eb1dd50068..979841e15e 100644 --- a/pengine/test10/master-demote.dot +++ b/pengine/test10/master-demote.dot @@ -1,37 +1,38 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] +"named_address_start_0 cxb1" -> "named_drbd_node:0_promote_0 cxb1" [ style = bold] "named_address_start_0 cxb1" -> "named_drbd_promote_0" [ style = bold] "named_address_start_0 cxb1" [ style=bold color="green" fontcolor="black" ] "named_address_stop_0 cxa1" -> "all_stopped" [ style = bold] "named_address_stop_0 cxa1" -> "named_address_start_0 cxb1" [ style = bold] "named_address_stop_0 cxa1" [ style=bold color="green" fontcolor="black" ] "named_drbd_confirmed-post_notify_promoted_0" -> "named_drbd_node:0_monitor_10000 cxb1" [ style = bold] "named_drbd_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] "named_drbd_confirmed-pre_notify_promote_0" -> "named_drbd_post_notify_promoted_0" [ style = bold] "named_drbd_confirmed-pre_notify_promote_0" -> "named_drbd_promote_0" [ style = bold] "named_drbd_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] "named_drbd_node:0_monitor_10000 cxb1" [ style=bold color="green" fontcolor="black" ] "named_drbd_node:0_post_notify_promote_0 cxb1" -> "named_drbd_confirmed-post_notify_promoted_0" [ style = bold] "named_drbd_node:0_post_notify_promote_0 cxb1" [ style=bold color="green" fontcolor="black" ] "named_drbd_node:0_pre_notify_promote_0 cxb1" -> "named_drbd_confirmed-pre_notify_promote_0" [ style = bold] "named_drbd_node:0_pre_notify_promote_0 cxb1" [ style=bold color="green" fontcolor="black" ] "named_drbd_node:0_promote_0 cxb1" -> "named_drbd_node:0_monitor_10000 cxb1" [ style = bold] "named_drbd_node:0_promote_0 cxb1" -> "named_drbd_promoted_0" [ style = bold] "named_drbd_node:0_promote_0 cxb1" [ style=bold color="green" fontcolor="black" ] "named_drbd_node:1_post_notify_promote_0 cxa1" -> "named_drbd_confirmed-post_notify_promoted_0" [ style = bold] "named_drbd_node:1_post_notify_promote_0 cxa1" [ style=bold color="green" fontcolor="black" ] "named_drbd_node:1_pre_notify_promote_0 cxa1" -> "named_drbd_confirmed-pre_notify_promote_0" [ style = bold] "named_drbd_node:1_pre_notify_promote_0 cxa1" [ style=bold color="green" fontcolor="black" ] "named_drbd_post_notify_promoted_0" -> "named_drbd_confirmed-post_notify_promoted_0" [ style = bold] "named_drbd_post_notify_promoted_0" -> "named_drbd_node:0_post_notify_promote_0 cxb1" [ style = bold] "named_drbd_post_notify_promoted_0" -> "named_drbd_node:1_post_notify_promote_0 cxa1" [ style = bold] "named_drbd_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] "named_drbd_pre_notify_promote_0" -> "named_drbd_confirmed-pre_notify_promote_0" [ style = bold] "named_drbd_pre_notify_promote_0" -> "named_drbd_node:0_pre_notify_promote_0 cxb1" [ style = bold] "named_drbd_pre_notify_promote_0" -> "named_drbd_node:1_pre_notify_promote_0 cxa1" [ style = bold] "named_drbd_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] "named_drbd_promote_0" -> "named_drbd_node:0_promote_0 cxb1" [ style = bold] "named_drbd_promote_0" [ style=bold color="green" fontcolor="orange" ] "named_drbd_promoted_0" -> "named_drbd_post_notify_promoted_0" [ style = bold] "named_drbd_promoted_0" [ style=bold color="green" fontcolor="orange" ] } diff --git a/pengine/test10/master-demote.exp b/pengine/test10/master-demote.exp index c10c4cd077..26851ad613 100644 --- a/pengine/test10/master-demote.exp +++ b/pengine/test10/master-demote.exp @@ -1,204 +1,207 @@ + + + diff --git a/pengine/test10/master-ordering.dot b/pengine/test10/master-ordering.dot index 442a37c59f..64e68c8d9f 100644 --- a/pengine/test10/master-ordering.dot +++ b/pengine/test10/master-ordering.dot @@ -1,97 +1,101 @@ digraph "g" { "apache2:0_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "apache2:0_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "drbd_mysql:0_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "drbd_mysql:0_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "drbd_mysql:0_post_notify_start_0 webcluster01" -> "ms_drbd_mysql_confirmed-post_notify_running_0" [ style = bold] "drbd_mysql:0_post_notify_start_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "drbd_mysql:0_start_0 webcluster01" -> "ms_drbd_mysql_running_0" [ style = bold] "drbd_mysql:0_start_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "drbd_www:0_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "drbd_www:0_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "drbd_www:0_post_notify_start_0 webcluster01" -> "ms_drbd_www_confirmed-post_notify_running_0" [ style = bold] "drbd_www:0_post_notify_start_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "drbd_www:0_start_0 webcluster01" -> "ms_drbd_www_running_0" [ style = bold] "drbd_www:0_start_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "extip_1_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "extip_1_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "extip_1_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black" ] "extip_1_start_0 webcluster01" -> "extip_1_monitor_30000 webcluster01" [ style = bold] "extip_1_start_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "extip_2_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "extip_2_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "extip_2_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black" ] "extip_2_start_0 webcluster01" -> "extip_2_monitor_30000 webcluster01" [ style = bold] "extip_2_start_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "fs_mysql_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "fs_mysql_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "intip_0_main_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "intip_0_main_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "intip_1_master_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "intip_1_master_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "intip_1_master_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black" ] +"intip_1_master_start_0 webcluster01" -> "drbd_mysql:0_start_0 webcluster01" [ style = bold] +"intip_1_master_start_0 webcluster01" -> "drbd_www:0_start_0 webcluster01" [ style = bold] "intip_1_master_start_0 webcluster01" -> "intip_1_master_monitor_30000 webcluster01" [ style = bold] "intip_1_master_start_0 webcluster01" -> "ms_drbd_mysql_start_0" [ style = bold] "intip_1_master_start_0 webcluster01" -> "ms_drbd_www_start_0" [ style = bold] "intip_1_master_start_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "intip_2_slave_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "intip_2_slave_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "intip_2_slave_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black" ] +"intip_2_slave_start_0 webcluster01" -> "drbd_mysql:0_start_0 webcluster01" [ style = bold] +"intip_2_slave_start_0 webcluster01" -> "drbd_www:0_start_0 webcluster01" [ style = bold] "intip_2_slave_start_0 webcluster01" -> "intip_2_slave_monitor_30000 webcluster01" [ style = bold] "intip_2_slave_start_0 webcluster01" -> "ms_drbd_mysql_start_0" [ style = bold] "intip_2_slave_start_0 webcluster01" -> "ms_drbd_www_start_0" [ style = bold] "intip_2_slave_start_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "ms_drbd_mysql_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_mysql_confirmed-pre_notify_start_0" -> "ms_drbd_mysql_post_notify_running_0" [ style = bold] "ms_drbd_mysql_confirmed-pre_notify_start_0" -> "ms_drbd_mysql_start_0" [ style = bold] "ms_drbd_mysql_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_mysql_post_notify_running_0" -> "drbd_mysql:0_post_notify_start_0 webcluster01" [ style = bold] "ms_drbd_mysql_post_notify_running_0" -> "ms_drbd_mysql_confirmed-post_notify_running_0" [ style = bold] "ms_drbd_mysql_post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_mysql_pre_notify_start_0" -> "ms_drbd_mysql_confirmed-pre_notify_start_0" [ style = bold] "ms_drbd_mysql_pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_mysql_running_0" -> "ms_drbd_mysql_post_notify_running_0" [ style = bold] "ms_drbd_mysql_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_mysql_start_0" -> "drbd_mysql:0_start_0 webcluster01" [ style = bold] "ms_drbd_mysql_start_0" -> "ms_drbd_mysql_running_0" [ style = bold] "ms_drbd_mysql_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_mysql_stop_0" -> "ms_drbd_mysql_start_0" [ style = bold] "ms_drbd_mysql_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_www_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_www_confirmed-pre_notify_start_0" -> "ms_drbd_www_post_notify_running_0" [ style = bold] "ms_drbd_www_confirmed-pre_notify_start_0" -> "ms_drbd_www_start_0" [ style = bold] "ms_drbd_www_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_www_post_notify_running_0" -> "drbd_www:0_post_notify_start_0 webcluster01" [ style = bold] "ms_drbd_www_post_notify_running_0" -> "ms_drbd_www_confirmed-post_notify_running_0" [ style = bold] "ms_drbd_www_post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_www_pre_notify_start_0" -> "ms_drbd_www_confirmed-pre_notify_start_0" [ style = bold] "ms_drbd_www_pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_www_running_0" -> "ms_drbd_www_post_notify_running_0" [ style = bold] "ms_drbd_www_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_www_start_0" -> "drbd_www:0_start_0 webcluster01" [ style = bold] "ms_drbd_www_start_0" -> "ms_drbd_www_running_0" [ style = bold] "ms_drbd_www_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_www_stop_0" -> "ms_drbd_www_start_0" [ style = bold] "ms_drbd_www_stop_0" [ style=bold color="green" fontcolor="orange" ] "mysql-proxy:0_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "mysql-proxy:0_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "mysql-server_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "mysql-server_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "ocfs2_www:0_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "ocfs2_www:0_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "ocfs2_www:1_monitor_0 webcluster01" -> "probe_complete webcluster01" [ style = bold] "ocfs2_www:1_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black" ] "probe_complete webcluster01" -> "probe_complete" [ style = bold] "probe_complete webcluster01" [ style=bold color="green" fontcolor="black" ] "probe_complete" -> "drbd_mysql:0_start_0 webcluster01" [ style = bold] "probe_complete" -> "drbd_www:0_start_0 webcluster01" [ style = bold] "probe_complete" -> "extip_1_start_0 webcluster01" [ style = bold] "probe_complete" -> "extip_2_start_0 webcluster01" [ style = bold] "probe_complete" -> "intip_1_master_start_0 webcluster01" [ style = bold] "probe_complete" -> "intip_2_slave_start_0 webcluster01" [ style = bold] "probe_complete" -> "ms_drbd_mysql_start_0" [ style = bold] "probe_complete" -> "ms_drbd_mysql_stop_0" [ style = bold] "probe_complete" -> "ms_drbd_www_start_0" [ style = bold] "probe_complete" -> "ms_drbd_www_stop_0" [ style = bold] "probe_complete" [ style=bold color="green" fontcolor="orange" ] } diff --git a/pengine/test10/master-ordering.exp b/pengine/test10/master-ordering.exp index afdef6725d..ec4c9929f7 100644 --- a/pengine/test10/master-ordering.exp +++ b/pengine/test10/master-ordering.exp @@ -1,544 +1,556 @@ + + + + + + + + + + + + diff --git a/pengine/test10/master-pseudo.dot b/pengine/test10/master-pseudo.dot index 651d84074a..0fd555f06e 100644 --- a/pengine/test10/master-pseudo.dot +++ b/pengine/test10/master-pseudo.dot @@ -1,57 +1,58 @@ digraph "g" { "drbd_float:0_post_notify_promote_0 sambuca.linbit" -> "ms_drbd_float_confirmed-post_notify_promoted_0" [ style = bold] "drbd_float:0_post_notify_promote_0 sambuca.linbit" [ style=bold color="green" fontcolor="black" ] "drbd_float:0_post_notify_start_0 sambuca.linbit" -> "ms_drbd_float_confirmed-post_notify_running_0" [ style = bold] "drbd_float:0_post_notify_start_0 sambuca.linbit" [ style=bold color="green" fontcolor="black" ] "drbd_float:0_pre_notify_promote_0 sambuca.linbit" -> "ms_drbd_float_confirmed-pre_notify_promote_0" [ style = bold] "drbd_float:0_pre_notify_promote_0 sambuca.linbit" [ style=bold color="green" fontcolor="black" ] "drbd_float:0_promote_0 sambuca.linbit" -> "ms_drbd_float_promoted_0" [ style = bold] "drbd_float:0_promote_0 sambuca.linbit" [ style=bold color="green" fontcolor="black" ] "drbd_float:0_start_0 sambuca.linbit" -> "drbd_float:0_promote_0 sambuca.linbit" [ style = bold] "drbd_float:0_start_0 sambuca.linbit" -> "ms_drbd_float_running_0" [ style = bold] "drbd_float:0_start_0 sambuca.linbit" [ style=bold color="green" fontcolor="black" ] "fs_float_start_0 " -> "nfsexport_running_0" [ style = dashed] "fs_float_start_0 " [ style=dashed color="red" fontcolor="black" ] +"ip_float_right_start_0 sambuca.linbit" -> "drbd_float:0_start_0 sambuca.linbit" [ style = bold] "ip_float_right_start_0 sambuca.linbit" -> "ms_drbd_float_start_0" [ style = bold] "ip_float_right_start_0 sambuca.linbit" [ style=bold color="green" fontcolor="black" ] "ip_nfs_start_0 sambuca.linbit" -> "fs_float_start_0 " [ style = dashed] "ip_nfs_start_0 sambuca.linbit" -> "nfsexport_running_0" [ style = dashed] "ip_nfs_start_0 sambuca.linbit" [ style=bold color="green" fontcolor="black" ] "ms_drbd_float_confirmed-post_notify_promoted_0" -> "nfsexport_start_0" [ style = bold] "ms_drbd_float_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_confirmed-post_notify_running_0" -> "ms_drbd_float_pre_notify_promote_0" [ style = bold] "ms_drbd_float_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_confirmed-pre_notify_promote_0" -> "ms_drbd_float_post_notify_promoted_0" [ style = bold] "ms_drbd_float_confirmed-pre_notify_promote_0" -> "ms_drbd_float_promote_0" [ style = bold] "ms_drbd_float_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_confirmed-pre_notify_start_0" -> "ms_drbd_float_post_notify_running_0" [ style = bold] "ms_drbd_float_confirmed-pre_notify_start_0" -> "ms_drbd_float_start_0" [ style = bold] "ms_drbd_float_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_post_notify_promoted_0" -> "drbd_float:0_post_notify_promote_0 sambuca.linbit" [ style = bold] "ms_drbd_float_post_notify_promoted_0" -> "ms_drbd_float_confirmed-post_notify_promoted_0" [ style = bold] "ms_drbd_float_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_post_notify_running_0" -> "drbd_float:0_post_notify_start_0 sambuca.linbit" [ style = bold] "ms_drbd_float_post_notify_running_0" -> "ms_drbd_float_confirmed-post_notify_running_0" [ style = bold] "ms_drbd_float_post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_pre_notify_promote_0" -> "drbd_float:0_pre_notify_promote_0 sambuca.linbit" [ style = bold] "ms_drbd_float_pre_notify_promote_0" -> "ms_drbd_float_confirmed-pre_notify_promote_0" [ style = bold] "ms_drbd_float_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_pre_notify_start_0" -> "ms_drbd_float_confirmed-pre_notify_start_0" [ style = bold] "ms_drbd_float_pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_promote_0" -> "drbd_float:0_promote_0 sambuca.linbit" [ style = bold] "ms_drbd_float_promote_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_promoted_0" -> "ms_drbd_float_post_notify_promoted_0" [ style = bold] "ms_drbd_float_promoted_0" -> "nfsexport_start_0" [ style = bold] "ms_drbd_float_promoted_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_running_0" -> "ms_drbd_float_post_notify_running_0" [ style = bold] "ms_drbd_float_running_0" -> "ms_drbd_float_promote_0" [ style = bold] "ms_drbd_float_running_0" [ style=bold color="green" fontcolor="orange" ] "ms_drbd_float_start_0" -> "drbd_float:0_start_0 sambuca.linbit" [ style = bold] "ms_drbd_float_start_0" -> "ms_drbd_float_promote_0" [ style = bold] "ms_drbd_float_start_0" -> "ms_drbd_float_running_0" [ style = bold] "ms_drbd_float_start_0" [ style=bold color="green" fontcolor="orange" ] "nfsexport_running_0" [ style=dashed color="red" fontcolor="orange" ] "nfsexport_start_0" -> "ip_nfs_start_0 sambuca.linbit" [ style = bold] "nfsexport_start_0" -> "nfsexport_running_0" [ style = dashed] "nfsexport_start_0" [ style=bold color="green" fontcolor="orange" ] } diff --git a/pengine/test10/master-pseudo.exp b/pengine/test10/master-pseudo.exp index 54659f85fc..830259450c 100644 --- a/pengine/test10/master-pseudo.exp +++ b/pengine/test10/master-pseudo.exp @@ -1,275 +1,278 @@ + + + diff --git a/pengine/test10/migrate-5.dot b/pengine/test10/migrate-5.dot index 43b1ef8eb9..419ac328d3 100644 --- a/pengine/test10/migrate-5.dot +++ b/pengine/test10/migrate-5.dot @@ -1,22 +1,23 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "clone-dom0-iscsi1_stop_0" -> "clone-dom0-iscsi1_stopped_0" [ style = bold] "clone-dom0-iscsi1_stop_0" -> "dom0-iscsi1:0_stop_0" [ style = bold] "clone-dom0-iscsi1_stop_0" [ style=bold color="green" fontcolor="orange" ] "clone-dom0-iscsi1_stopped_0" [ style=bold color="green" fontcolor="orange" ] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" -> "all_stopped" [ style = bold] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" -> "dom0-iscsi1:0_stopped_0" [ style = bold] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" [ style=bold color="green" fontcolor="black" ] "dom0-iscsi1:0_stop_0" -> "dom0-iscsi1-cnx1:0_stop_0 dom0-02" [ style = bold] "dom0-iscsi1:0_stop_0" -> "dom0-iscsi1:0_stopped_0" [ style = bold] "dom0-iscsi1:0_stop_0" [ style=bold color="green" fontcolor="orange" ] "dom0-iscsi1:0_stopped_0" -> "clone-dom0-iscsi1_stopped_0" [ style = bold] "dom0-iscsi1:0_stopped_0" [ style=bold color="green" fontcolor="orange" ] "domU-test01_migrate_from_0 dom0-01" -> "all_stopped" [ style = bold] "domU-test01_migrate_from_0 dom0-01" -> "clone-dom0-iscsi1_stop_0" [ style = bold] "domU-test01_migrate_from_0 dom0-01" [ style=bold color="green" fontcolor="black" ] "domU-test01_migrate_to_0 dom0-02" -> "all_stopped" [ style = bold] "domU-test01_migrate_to_0 dom0-02" -> "clone-dom0-iscsi1_stop_0" [ style = bold] +"domU-test01_migrate_to_0 dom0-02" -> "dom0-iscsi1:0_stop_0" [ style = bold] "domU-test01_migrate_to_0 dom0-02" -> "domU-test01_migrate_from_0 dom0-01" [ style = bold] "domU-test01_migrate_to_0 dom0-02" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/migrate-5.exp b/pengine/test10/migrate-5.exp index 66977e20c9..b14854e267 100644 --- a/pengine/test10/migrate-5.exp +++ b/pengine/test10/migrate-5.exp @@ -1,113 +1,116 @@ + + + diff --git a/pengine/test10/migrate-stop-complex.dot b/pengine/test10/migrate-stop-complex.dot index 777aac66ab..a90f421a8b 100644 --- a/pengine/test10/migrate-stop-complex.dot +++ b/pengine/test10/migrate-stop-complex.dot @@ -1,37 +1,39 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "bottom:1_stop_0 dom0-02" -> "all_stopped" [ style = bold] "bottom:1_stop_0 dom0-02" -> "clone-bottom_stopped_0" [ style = bold] "bottom:1_stop_0 dom0-02" [ style=bold color="green" fontcolor="black" ] "clone-bottom_stop_0" -> "bottom:1_stop_0 dom0-02" [ style = bold] "clone-bottom_stop_0" -> "clone-bottom_stopped_0" [ style = bold] "clone-bottom_stop_0" [ style=bold color="green" fontcolor="orange" ] "clone-bottom_stopped_0" [ style=bold color="green" fontcolor="orange" ] "clone-dom0-iscsi1_stop_0" -> "clone-dom0-iscsi1_stopped_0" [ style = bold] "clone-dom0-iscsi1_stop_0" -> "dom0-iscsi1:0_stop_0" [ style = bold] "clone-dom0-iscsi1_stop_0" [ style=bold color="green" fontcolor="orange" ] "clone-dom0-iscsi1_stopped_0" [ style=bold color="green" fontcolor="orange" ] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" -> "all_stopped" [ style = bold] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" -> "dom0-iscsi1:0_stopped_0" [ style = bold] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" [ style=bold color="green" fontcolor="black" ] "dom0-iscsi1:0_stop_0" -> "dom0-iscsi1-cnx1:0_stop_0 dom0-02" [ style = bold] "dom0-iscsi1:0_stop_0" -> "dom0-iscsi1:0_stopped_0" [ style = bold] "dom0-iscsi1:0_stop_0" [ style=bold color="green" fontcolor="orange" ] "dom0-iscsi1:0_stopped_0" -> "clone-dom0-iscsi1_stopped_0" [ style = bold] "dom0-iscsi1:0_stopped_0" [ style=bold color="green" fontcolor="orange" ] "domU-test01_migrate_from_0 dom0-01" -> "all_stopped" [ style = bold] "domU-test01_migrate_from_0 dom0-01" -> "clone-bottom_stop_0" [ style = bold] "domU-test01_migrate_from_0 dom0-01" -> "clone-dom0-iscsi1_stop_0" [ style = bold] "domU-test01_migrate_from_0 dom0-01" -> "top_start_0 dom0-01" [ style = bold] "domU-test01_migrate_from_0 dom0-01" [ style=bold color="green" fontcolor="black" ] "domU-test01_migrate_to_0 dom0-02" -> "all_stopped" [ style = bold] +"domU-test01_migrate_to_0 dom0-02" -> "bottom:1_stop_0 dom0-02" [ style = bold] "domU-test01_migrate_to_0 dom0-02" -> "clone-bottom_stop_0" [ style = bold] "domU-test01_migrate_to_0 dom0-02" -> "clone-dom0-iscsi1_stop_0" [ style = bold] +"domU-test01_migrate_to_0 dom0-02" -> "dom0-iscsi1:0_stop_0" [ style = bold] "domU-test01_migrate_to_0 dom0-02" -> "domU-test01_migrate_from_0 dom0-01" [ style = bold] "domU-test01_migrate_to_0 dom0-02" [ style=bold color="green" fontcolor="black" ] "top_start_0 dom0-01" [ style=bold color="green" fontcolor="black" ] "top_stop_0 dom0-02" -> "all_stopped" [ style = bold] "top_stop_0 dom0-02" -> "domU-test01_migrate_to_0 dom0-02" [ style = bold] "top_stop_0 dom0-02" -> "top_start_0 dom0-01" [ style = bold] "top_stop_0 dom0-02" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/migrate-stop-complex.exp b/pengine/test10/migrate-stop-complex.exp index ef553350b1..c0117f6061 100644 --- a/pengine/test10/migrate-stop-complex.exp +++ b/pengine/test10/migrate-stop-complex.exp @@ -1,191 +1,197 @@ + + + + + + diff --git a/pengine/test10/migrate-stop.dot b/pengine/test10/migrate-stop.dot index 817e9cd354..7ee7c8fa1c 100644 --- a/pengine/test10/migrate-stop.dot +++ b/pengine/test10/migrate-stop.dot @@ -1,22 +1,23 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "clone-dom0-iscsi1_stop_0" -> "clone-dom0-iscsi1_stopped_0" [ style = bold] "clone-dom0-iscsi1_stop_0" -> "dom0-iscsi1:0_stop_0" [ style = bold] "clone-dom0-iscsi1_stop_0" [ style=bold color="green" fontcolor="orange" ] "clone-dom0-iscsi1_stopped_0" [ style=bold color="green" fontcolor="orange" ] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" -> "all_stopped" [ style = bold] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" -> "dom0-iscsi1:0_stopped_0" [ style = bold] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" [ style=bold color="green" fontcolor="black" ] "dom0-iscsi1:0_stop_0" -> "dom0-iscsi1-cnx1:0_stop_0 dom0-02" [ style = bold] "dom0-iscsi1:0_stop_0" -> "dom0-iscsi1:0_stopped_0" [ style = bold] "dom0-iscsi1:0_stop_0" [ style=bold color="green" fontcolor="orange" ] "dom0-iscsi1:0_stopped_0" -> "clone-dom0-iscsi1_stopped_0" [ style = bold] "dom0-iscsi1:0_stopped_0" [ style=bold color="green" fontcolor="orange" ] "domU-test01_migrate_from_0 dom0-01" -> "all_stopped" [ style = bold] "domU-test01_migrate_from_0 dom0-01" -> "clone-dom0-iscsi1_stop_0" [ style = bold] "domU-test01_migrate_from_0 dom0-01" [ style=bold color="green" fontcolor="black" ] "domU-test01_migrate_to_0 dom0-02" -> "all_stopped" [ style = bold] "domU-test01_migrate_to_0 dom0-02" -> "clone-dom0-iscsi1_stop_0" [ style = bold] +"domU-test01_migrate_to_0 dom0-02" -> "dom0-iscsi1:0_stop_0" [ style = bold] "domU-test01_migrate_to_0 dom0-02" -> "domU-test01_migrate_from_0 dom0-01" [ style = bold] "domU-test01_migrate_to_0 dom0-02" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/migrate-stop.exp b/pengine/test10/migrate-stop.exp index 66977e20c9..b14854e267 100644 --- a/pengine/test10/migrate-stop.exp +++ b/pengine/test10/migrate-stop.exp @@ -1,113 +1,116 @@ + + + diff --git a/pengine/test10/migrate-stop_start.dot b/pengine/test10/migrate-stop_start.dot index e25b0328f4..ced9d982db 100644 --- a/pengine/test10/migrate-stop_start.dot +++ b/pengine/test10/migrate-stop_start.dot @@ -1,37 +1,38 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "clone-dom0-iscsi1_running_0" -> "domU-test01_start_0 dom0-01" [ style = bold] "clone-dom0-iscsi1_running_0" [ style=bold color="green" fontcolor="orange" ] "clone-dom0-iscsi1_start_0" -> "clone-dom0-iscsi1_running_0" [ style = bold] "clone-dom0-iscsi1_start_0" -> "dom0-iscsi1:0_start_0" [ style = bold] "clone-dom0-iscsi1_start_0" [ style=bold color="green" fontcolor="orange" ] "clone-dom0-iscsi1_stop_0" -> "clone-dom0-iscsi1_start_0" [ style = bold] "clone-dom0-iscsi1_stop_0" -> "clone-dom0-iscsi1_stopped_0" [ style = bold] "clone-dom0-iscsi1_stop_0" -> "dom0-iscsi1:0_stop_0" [ style = bold] "clone-dom0-iscsi1_stop_0" [ style=bold color="green" fontcolor="orange" ] "clone-dom0-iscsi1_stopped_0" -> "clone-dom0-iscsi1_start_0" [ style = bold] "clone-dom0-iscsi1_stopped_0" [ style=bold color="green" fontcolor="orange" ] "dom0-iscsi1-cnx1:0_start_0 dom0-01" -> "dom0-iscsi1:0_running_0" [ style = bold] "dom0-iscsi1-cnx1:0_start_0 dom0-01" [ style=bold color="green" fontcolor="black" ] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" -> "all_stopped" [ style = bold] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" -> "dom0-iscsi1-cnx1:0_start_0 dom0-01" [ style = bold] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" -> "dom0-iscsi1:0_stopped_0" [ style = bold] "dom0-iscsi1-cnx1:0_stop_0 dom0-02" [ style=bold color="green" fontcolor="black" ] "dom0-iscsi1:0_running_0" -> "clone-dom0-iscsi1_running_0" [ style = bold] "dom0-iscsi1:0_running_0" [ style=bold color="green" fontcolor="orange" ] "dom0-iscsi1:0_start_0" -> "dom0-iscsi1-cnx1:0_start_0 dom0-01" [ style = bold] "dom0-iscsi1:0_start_0" -> "dom0-iscsi1:0_running_0" [ style = bold] "dom0-iscsi1:0_start_0" [ style=bold color="green" fontcolor="orange" ] "dom0-iscsi1:0_stop_0" -> "dom0-iscsi1-cnx1:0_stop_0 dom0-02" [ style = bold] "dom0-iscsi1:0_stop_0" -> "dom0-iscsi1:0_start_0" [ style = bold] "dom0-iscsi1:0_stop_0" -> "dom0-iscsi1:0_stopped_0" [ style = bold] "dom0-iscsi1:0_stop_0" [ style=bold color="green" fontcolor="orange" ] "dom0-iscsi1:0_stopped_0" -> "clone-dom0-iscsi1_stopped_0" [ style = bold] "dom0-iscsi1:0_stopped_0" -> "dom0-iscsi1:0_start_0" [ style = bold] "dom0-iscsi1:0_stopped_0" [ style=bold color="green" fontcolor="orange" ] "domU-test01_start_0 dom0-01" [ style=bold color="green" fontcolor="black" ] "domU-test01_stop_0 dom0-02" -> "all_stopped" [ style = bold] "domU-test01_stop_0 dom0-02" -> "clone-dom0-iscsi1_stop_0" [ style = bold] +"domU-test01_stop_0 dom0-02" -> "dom0-iscsi1:0_stop_0" [ style = bold] "domU-test01_stop_0 dom0-02" -> "domU-test01_start_0 dom0-01" [ style = bold] "domU-test01_stop_0 dom0-02" [ style=bold color="green" fontcolor="black" ] } diff --git a/pengine/test10/migrate-stop_start.exp b/pengine/test10/migrate-stop_start.exp index 46327c8c0d..2bd2729a34 100644 --- a/pengine/test10/migrate-stop_start.exp +++ b/pengine/test10/migrate-stop_start.exp @@ -1,189 +1,192 @@ + + + diff --git a/pengine/test10/novell-239082.dot b/pengine/test10/novell-239082.dot index a8638c5304..e65e567b26 100644 --- a/pengine/test10/novell-239082.dot +++ b/pengine/test10/novell-239082.dot @@ -1,94 +1,95 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "do_shutdown xen-1" [ style=bold color="green" fontcolor="black" ] "drbd0:0_post_notify_demote_0 xen-2" -> "ms-drbd0_confirmed-post_notify_demoted_0" [ style = bold] "drbd0:0_post_notify_demote_0 xen-2" [ style=bold color="green" fontcolor="black" ] "drbd0:0_post_notify_promote_0 xen-2" -> "ms-drbd0_confirmed-post_notify_promoted_0" [ style = bold] "drbd0:0_post_notify_promote_0 xen-2" [ style=bold color="green" fontcolor="black" ] "drbd0:0_post_notify_stop_0 xen-2" -> "ms-drbd0_confirmed-post_notify_stopped_0" [ style = bold] "drbd0:0_post_notify_stop_0 xen-2" [ style=bold color="green" fontcolor="black" ] "drbd0:0_pre_notify_demote_0 xen-2" -> "ms-drbd0_confirmed-pre_notify_demote_0" [ style = bold] "drbd0:0_pre_notify_demote_0 xen-2" [ style=bold color="green" fontcolor="black" ] "drbd0:0_pre_notify_promote_0 xen-2" -> "ms-drbd0_confirmed-pre_notify_promote_0" [ style = bold] "drbd0:0_pre_notify_promote_0 xen-2" [ style=bold color="green" fontcolor="black" ] "drbd0:0_pre_notify_stop_0 xen-2" -> "ms-drbd0_confirmed-pre_notify_stop_0" [ style = bold] "drbd0:0_pre_notify_stop_0 xen-2" [ style=bold color="green" fontcolor="black" ] "drbd0:0_promote_0 xen-2" -> "ms-drbd0_promoted_0" [ style = bold] "drbd0:0_promote_0 xen-2" [ style=bold color="green" fontcolor="black" ] "drbd0:1_demote_0 xen-1" -> "drbd0:1_stop_0 xen-1" [ style = bold] "drbd0:1_demote_0 xen-1" -> "ms-drbd0_demoted_0" [ style = bold] "drbd0:1_demote_0 xen-1" [ style=bold color="green" fontcolor="black" ] "drbd0:1_post_notify_demote_0 xen-1" -> "ms-drbd0_confirmed-post_notify_demoted_0" [ style = bold] "drbd0:1_post_notify_demote_0 xen-1" [ style=bold color="green" fontcolor="black" ] "drbd0:1_pre_notify_demote_0 xen-1" -> "ms-drbd0_confirmed-pre_notify_demote_0" [ style = bold] "drbd0:1_pre_notify_demote_0 xen-1" [ style=bold color="green" fontcolor="black" ] "drbd0:1_pre_notify_stop_0 xen-1" -> "ms-drbd0_confirmed-pre_notify_stop_0" [ style = bold] "drbd0:1_pre_notify_stop_0 xen-1" [ style=bold color="green" fontcolor="black" ] "drbd0:1_stop_0 xen-1" -> "all_stopped" [ style = bold] "drbd0:1_stop_0 xen-1" -> "do_shutdown xen-1" [ style = bold] "drbd0:1_stop_0 xen-1" -> "ms-drbd0_stopped_0" [ style = bold] "drbd0:1_stop_0 xen-1" [ style=bold color="green" fontcolor="black" ] "fs_1_start_0 xen-2" [ style=bold color="green" fontcolor="black" ] "fs_1_stop_0 xen-1" -> "all_stopped" [ style = bold] "fs_1_stop_0 xen-1" -> "do_shutdown xen-1" [ style = bold] +"fs_1_stop_0 xen-1" -> "drbd0:1_demote_0 xen-1" [ style = bold] "fs_1_stop_0 xen-1" -> "fs_1_start_0 xen-2" [ style = bold] "fs_1_stop_0 xen-1" -> "ms-drbd0_demote_0" [ style = bold] "fs_1_stop_0 xen-1" [ style=bold color="green" fontcolor="black" ] "ms-drbd0_confirmed-post_notify_demoted_0" -> "ms-drbd0_pre_notify_promote_0" [ style = bold] "ms-drbd0_confirmed-post_notify_demoted_0" -> "ms-drbd0_pre_notify_stop_0" [ style = bold] "ms-drbd0_confirmed-post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_confirmed-post_notify_promoted_0" -> "fs_1_start_0 xen-2" [ style = bold] "ms-drbd0_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] "ms-drbd0_confirmed-post_notify_stopped_0" -> "ms-drbd0_pre_notify_promote_0" [ style = bold] "ms-drbd0_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_confirmed-pre_notify_demote_0" -> "ms-drbd0_demote_0" [ style = bold] "ms-drbd0_confirmed-pre_notify_demote_0" -> "ms-drbd0_post_notify_demoted_0" [ style = bold] "ms-drbd0_confirmed-pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_confirmed-pre_notify_promote_0" -> "ms-drbd0_post_notify_promoted_0" [ style = bold] "ms-drbd0_confirmed-pre_notify_promote_0" -> "ms-drbd0_promote_0" [ style = bold] "ms-drbd0_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_confirmed-pre_notify_stop_0" -> "ms-drbd0_post_notify_stopped_0" [ style = bold] "ms-drbd0_confirmed-pre_notify_stop_0" -> "ms-drbd0_stop_0" [ style = bold] "ms-drbd0_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_demote_0" -> "drbd0:1_demote_0 xen-1" [ style = bold] "ms-drbd0_demote_0" -> "ms-drbd0_demoted_0" [ style = bold] "ms-drbd0_demote_0" -> "ms-drbd0_stop_0" [ style = bold] "ms-drbd0_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_demoted_0" -> "ms-drbd0_post_notify_demoted_0" [ style = bold] "ms-drbd0_demoted_0" -> "ms-drbd0_promote_0" [ style = bold] "ms-drbd0_demoted_0" -> "ms-drbd0_stop_0" [ style = bold] "ms-drbd0_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_post_notify_demoted_0" -> "drbd0:0_post_notify_demote_0 xen-2" [ style = bold] "ms-drbd0_post_notify_demoted_0" -> "drbd0:1_post_notify_demote_0 xen-1" [ style = bold] "ms-drbd0_post_notify_demoted_0" -> "ms-drbd0_confirmed-post_notify_demoted_0" [ style = bold] "ms-drbd0_post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_post_notify_promoted_0" -> "drbd0:0_post_notify_promote_0 xen-2" [ style = bold] "ms-drbd0_post_notify_promoted_0" -> "ms-drbd0_confirmed-post_notify_promoted_0" [ style = bold] "ms-drbd0_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_post_notify_stopped_0" -> "drbd0:0_post_notify_stop_0 xen-2" [ style = bold] "ms-drbd0_post_notify_stopped_0" -> "ms-drbd0_confirmed-post_notify_stopped_0" [ style = bold] "ms-drbd0_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_pre_notify_demote_0" -> "drbd0:0_pre_notify_demote_0 xen-2" [ style = bold] "ms-drbd0_pre_notify_demote_0" -> "drbd0:1_pre_notify_demote_0 xen-1" [ style = bold] "ms-drbd0_pre_notify_demote_0" -> "ms-drbd0_confirmed-pre_notify_demote_0" [ style = bold] "ms-drbd0_pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_pre_notify_promote_0" -> "drbd0:0_pre_notify_promote_0 xen-2" [ style = bold] "ms-drbd0_pre_notify_promote_0" -> "ms-drbd0_confirmed-pre_notify_promote_0" [ style = bold] "ms-drbd0_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_pre_notify_stop_0" -> "drbd0:0_pre_notify_stop_0 xen-2" [ style = bold] "ms-drbd0_pre_notify_stop_0" -> "drbd0:1_pre_notify_stop_0 xen-1" [ style = bold] "ms-drbd0_pre_notify_stop_0" -> "ms-drbd0_confirmed-pre_notify_stop_0" [ style = bold] "ms-drbd0_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_promote_0" -> "drbd0:0_promote_0 xen-2" [ style = bold] "ms-drbd0_promote_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_promoted_0" -> "fs_1_start_0 xen-2" [ style = bold] "ms-drbd0_promoted_0" -> "ms-drbd0_post_notify_promoted_0" [ style = bold] "ms-drbd0_promoted_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_stop_0" -> "drbd0:1_stop_0 xen-1" [ style = bold] "ms-drbd0_stop_0" -> "ms-drbd0_stopped_0" [ style = bold] "ms-drbd0_stop_0" [ style=bold color="green" fontcolor="orange" ] "ms-drbd0_stopped_0" -> "ms-drbd0_post_notify_stopped_0" [ style = bold] "ms-drbd0_stopped_0" -> "ms-drbd0_promote_0" [ style = bold] "ms-drbd0_stopped_0" [ style=bold color="green" fontcolor="orange" ] } diff --git a/pengine/test10/novell-239082.exp b/pengine/test10/novell-239082.exp index abb522d27a..5db2ea0836 100644 --- a/pengine/test10/novell-239082.exp +++ b/pengine/test10/novell-239082.exp @@ -1,495 +1,498 @@ + + + diff --git a/pengine/test10/novell-252693-3.dot b/pengine/test10/novell-252693-3.dot index 8bca13342f..c4ec399c94 100644 --- a/pengine/test10/novell-252693-3.dot +++ b/pengine/test10/novell-252693-3.dot @@ -1,170 +1,171 @@ digraph "g" { "all_stopped" [ style=bold color="green" fontcolor="orange" ] "configstoreclone:0_post_notify_start_0 node2" -> "configstorecloneset_confirmed-post_notify_running_0" [ style = bold] "configstoreclone:0_post_notify_start_0 node2" [ style=bold color="green" fontcolor="black" ] "configstoreclone:0_pre_notify_start_0 node2" -> "configstorecloneset_confirmed-pre_notify_start_0" [ style = bold] "configstoreclone:0_pre_notify_start_0 node2" [ style=bold color="green" fontcolor="black" ] "configstoreclone:1_monitor_0 node1" -> "probe_complete node1" [ style = bold] "configstoreclone:1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "configstoreclone:1_monitor_20000 node1" [ style=bold color="green" fontcolor="black" ] "configstoreclone:1_post_notify_start_0 node1" -> "configstorecloneset_confirmed-post_notify_running_0" [ style = bold] "configstoreclone:1_post_notify_start_0 node1" [ style=bold color="green" fontcolor="black" ] "configstoreclone:1_start_0 node1" -> "configstoreclone:1_monitor_20000 node1" [ style = bold] "configstoreclone:1_start_0 node1" -> "configstorecloneset_running_0" [ style = bold] "configstoreclone:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] "configstorecloneset_confirmed-post_notify_running_0" -> "configstoreclone:1_monitor_20000 node1" [ style = bold] "configstorecloneset_confirmed-post_notify_running_0" -> "sles10_start_0 node1" [ style = bold] "configstorecloneset_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "configstorecloneset_confirmed-pre_notify_start_0" -> "configstorecloneset_post_notify_running_0" [ style = bold] "configstorecloneset_confirmed-pre_notify_start_0" -> "configstorecloneset_start_0" [ style = bold] "configstorecloneset_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "configstorecloneset_post_notify_running_0" -> "configstoreclone:0_post_notify_start_0 node2" [ style = bold] "configstorecloneset_post_notify_running_0" -> "configstoreclone:1_post_notify_start_0 node1" [ style = bold] "configstorecloneset_post_notify_running_0" -> "configstorecloneset_confirmed-post_notify_running_0" [ style = bold] "configstorecloneset_post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "configstorecloneset_pre_notify_start_0" -> "configstoreclone:0_pre_notify_start_0 node2" [ style = bold] "configstorecloneset_pre_notify_start_0" -> "configstorecloneset_confirmed-pre_notify_start_0" [ style = bold] "configstorecloneset_pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "configstorecloneset_running_0" -> "configstorecloneset_post_notify_running_0" [ style = bold] "configstorecloneset_running_0" -> "sles10_start_0 node1" [ style = bold] "configstorecloneset_running_0" [ style=bold color="green" fontcolor="orange" ] "configstorecloneset_start_0" -> "configstoreclone:1_start_0 node1" [ style = bold] "configstorecloneset_start_0" -> "configstorecloneset_running_0" [ style = bold] "configstorecloneset_start_0" [ style=bold color="green" fontcolor="orange" ] "evmsclone:0_post_notify_start_0 node2" -> "evmscloneset_confirmed-post_notify_running_0" [ style = bold] "evmsclone:0_post_notify_start_0 node2" [ style=bold color="green" fontcolor="black" ] "evmsclone:0_pre_notify_start_0 node2" -> "evmscloneset_confirmed-pre_notify_start_0" [ style = bold] "evmsclone:0_pre_notify_start_0 node2" [ style=bold color="green" fontcolor="black" ] "evmsclone:1_monitor_0 node1" -> "probe_complete node1" [ style = bold] "evmsclone:1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "evmsclone:1_post_notify_start_0 node1" -> "evmscloneset_confirmed-post_notify_running_0" [ style = bold] "evmsclone:1_post_notify_start_0 node1" [ style=bold color="green" fontcolor="black" ] "evmsclone:1_start_0 node1" -> "evmscloneset_running_0" [ style = bold] "evmsclone:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] "evmscloneset_confirmed-post_notify_running_0" -> "configstorecloneset_start_0" [ style = bold] "evmscloneset_confirmed-post_notify_running_0" -> "imagestorecloneset_start_0" [ style = bold] "evmscloneset_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "evmscloneset_confirmed-pre_notify_start_0" -> "evmscloneset_post_notify_running_0" [ style = bold] "evmscloneset_confirmed-pre_notify_start_0" -> "evmscloneset_start_0" [ style = bold] "evmscloneset_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "evmscloneset_post_notify_running_0" -> "evmsclone:0_post_notify_start_0 node2" [ style = bold] "evmscloneset_post_notify_running_0" -> "evmsclone:1_post_notify_start_0 node1" [ style = bold] "evmscloneset_post_notify_running_0" -> "evmscloneset_confirmed-post_notify_running_0" [ style = bold] "evmscloneset_post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "evmscloneset_pre_notify_start_0" -> "evmsclone:0_pre_notify_start_0 node2" [ style = bold] "evmscloneset_pre_notify_start_0" -> "evmscloneset_confirmed-pre_notify_start_0" [ style = bold] "evmscloneset_pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "evmscloneset_running_0" -> "configstorecloneset_start_0" [ style = bold] "evmscloneset_running_0" -> "evmscloneset_post_notify_running_0" [ style = bold] "evmscloneset_running_0" -> "imagestorecloneset_start_0" [ style = bold] "evmscloneset_running_0" [ style=bold color="green" fontcolor="orange" ] "evmscloneset_start_0" -> "evmsclone:1_start_0 node1" [ style = bold] "evmscloneset_start_0" -> "evmscloneset_running_0" [ style = bold] "evmscloneset_start_0" [ style=bold color="green" fontcolor="orange" ] "evmsdclone:1_monitor_0 node1" -> "probe_complete node1" [ style = bold] "evmsdclone:1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "evmsdclone:1_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] "evmsdclone:1_start_0 node1" -> "evmsdclone:1_monitor_5000 node1" [ style = bold] "evmsdclone:1_start_0 node1" -> "evmsdcloneset_running_0" [ style = bold] "evmsdclone:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] "evmsdcloneset_running_0" -> "evmscloneset_start_0" [ style = bold] "evmsdcloneset_running_0" [ style=bold color="green" fontcolor="orange" ] "evmsdcloneset_start_0" -> "evmsdclone:1_start_0 node1" [ style = bold] "evmsdcloneset_start_0" -> "evmsdcloneset_running_0" [ style = bold] "evmsdcloneset_start_0" [ style=bold color="green" fontcolor="orange" ] "imagestoreclone:0_monitor_0 node1" -> "probe_complete node1" [ style = bold] "imagestoreclone:0_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:0_monitor_20000 node1" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:0_post_notify_start_0 node1" -> "imagestorecloneset_confirmed-post_notify_running_0" [ style = bold] "imagestoreclone:0_post_notify_start_0 node1" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:0_pre_notify_stop_0 node2" -> "imagestorecloneset_confirmed-pre_notify_stop_0" [ style = bold] "imagestoreclone:0_pre_notify_stop_0 node2" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:0_start_0 node1" -> "imagestoreclone:0_monitor_20000 node1" [ style = bold] "imagestoreclone:0_start_0 node1" -> "imagestorecloneset_running_0" [ style = bold] "imagestoreclone:0_start_0 node1" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:0_stop_0 node2" -> "all_stopped" [ style = bold] "imagestoreclone:0_stop_0 node2" -> "imagestoreclone:0_start_0 node1" [ style = bold] "imagestoreclone:0_stop_0 node2" -> "imagestorecloneset_stopped_0" [ style = bold] "imagestoreclone:0_stop_0 node2" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:1_monitor_20000 node2" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:1_post_notify_start_0 node2" -> "imagestorecloneset_confirmed-post_notify_running_0" [ style = bold] "imagestoreclone:1_post_notify_start_0 node2" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:1_start_0 node2" -> "imagestoreclone:1_monitor_20000 node2" [ style = bold] "imagestoreclone:1_start_0 node2" -> "imagestorecloneset_running_0" [ style = bold] "imagestoreclone:1_start_0 node2" [ style=bold color="green" fontcolor="black" ] "imagestorecloneset_confirmed-post_notify_running_0" -> "imagestoreclone:0_monitor_20000 node1" [ style = bold] "imagestorecloneset_confirmed-post_notify_running_0" -> "imagestoreclone:1_monitor_20000 node2" [ style = bold] "imagestorecloneset_confirmed-post_notify_running_0" -> "sles10_start_0 node1" [ style = bold] "imagestorecloneset_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] "imagestorecloneset_confirmed-post_notify_stopped_0" -> "imagestorecloneset_pre_notify_start_0" [ style = bold] "imagestorecloneset_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_confirmed-pre_notify_start_0" -> "imagestorecloneset_post_notify_running_0" [ style = bold] "imagestorecloneset_confirmed-pre_notify_start_0" -> "imagestorecloneset_start_0" [ style = bold] "imagestorecloneset_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_confirmed-pre_notify_stop_0" -> "imagestorecloneset_post_notify_stopped_0" [ style = bold] "imagestorecloneset_confirmed-pre_notify_stop_0" -> "imagestorecloneset_stop_0" [ style = bold] "imagestorecloneset_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_post_notify_running_0" -> "imagestoreclone:0_post_notify_start_0 node1" [ style = bold] "imagestorecloneset_post_notify_running_0" -> "imagestoreclone:1_post_notify_start_0 node2" [ style = bold] "imagestorecloneset_post_notify_running_0" -> "imagestorecloneset_confirmed-post_notify_running_0" [ style = bold] "imagestorecloneset_post_notify_running_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_post_notify_stopped_0" -> "imagestorecloneset_confirmed-post_notify_stopped_0" [ style = bold] "imagestorecloneset_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_pre_notify_start_0" -> "imagestorecloneset_confirmed-pre_notify_start_0" [ style = bold] "imagestorecloneset_pre_notify_start_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_pre_notify_stop_0" -> "imagestoreclone:0_pre_notify_stop_0 node2" [ style = bold] "imagestorecloneset_pre_notify_stop_0" -> "imagestorecloneset_confirmed-pre_notify_stop_0" [ style = bold] "imagestorecloneset_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_running_0" -> "imagestorecloneset_post_notify_running_0" [ style = bold] "imagestorecloneset_running_0" -> "sles10_start_0 node1" [ style = bold] "imagestorecloneset_running_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_start_0" -> "imagestoreclone:0_start_0 node1" [ style = bold] "imagestorecloneset_start_0" -> "imagestoreclone:1_start_0 node2" [ style = bold] "imagestorecloneset_start_0" -> "imagestorecloneset_running_0" [ style = bold] "imagestorecloneset_start_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_stop_0" -> "imagestoreclone:0_stop_0 node2" [ style = bold] "imagestorecloneset_stop_0" -> "imagestorecloneset_start_0" [ style = bold] "imagestorecloneset_stop_0" -> "imagestorecloneset_stopped_0" [ style = bold] "imagestorecloneset_stop_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_stopped_0" -> "imagestorecloneset_post_notify_stopped_0" [ style = bold] "imagestorecloneset_stopped_0" -> "imagestorecloneset_start_0" [ style = bold] "imagestorecloneset_stopped_0" [ style=bold color="green" fontcolor="orange" ] "probe_complete node1" -> "probe_complete" [ style = bold] "probe_complete node1" [ style=bold color="green" fontcolor="black" ] "probe_complete" -> "configstoreclone:1_start_0 node1" [ style = bold] "probe_complete" -> "configstorecloneset_start_0" [ style = bold] "probe_complete" -> "evmsclone:1_start_0 node1" [ style = bold] "probe_complete" -> "evmscloneset_start_0" [ style = bold] "probe_complete" -> "evmsdclone:1_start_0 node1" [ style = bold] "probe_complete" -> "evmsdcloneset_start_0" [ style = bold] "probe_complete" -> "imagestoreclone:0_start_0 node1" [ style = bold] "probe_complete" -> "imagestoreclone:0_stop_0 node2" [ style = bold] "probe_complete" -> "imagestoreclone:1_start_0 node2" [ style = bold] "probe_complete" -> "imagestorecloneset_start_0" [ style = bold] "probe_complete" -> "imagestorecloneset_stop_0" [ style = bold] "probe_complete" -> "sles10_start_0 node1" [ style = bold] "probe_complete" -> "sles10_stop_0 node2" [ style = bold] "probe_complete" -> "stonithclone:1_start_0 node1" [ style = bold] "probe_complete" -> "stonithcloneset_start_0" [ style = bold] "probe_complete" [ style=bold color="green" fontcolor="orange" ] "sles10_monitor_0 node1" -> "probe_complete node1" [ style = bold] "sles10_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "sles10_monitor_10000 node1" [ style=bold color="green" fontcolor="black" ] "sles10_start_0 node1" -> "sles10_monitor_10000 node1" [ style = bold] "sles10_start_0 node1" [ style=bold color="green" fontcolor="black" ] "sles10_stop_0 node2" -> "all_stopped" [ style = bold] +"sles10_stop_0 node2" -> "imagestoreclone:0_stop_0 node2" [ style = bold] "sles10_stop_0 node2" -> "imagestorecloneset_stop_0" [ style = bold] "sles10_stop_0 node2" -> "sles10_start_0 node1" [ style = bold] "sles10_stop_0 node2" [ style=bold color="green" fontcolor="black" ] "stonithclone:1_monitor_0 node1" -> "probe_complete node1" [ style = bold] "stonithclone:1_monitor_0 node1" [ style=bold color="green" fontcolor="black" ] "stonithclone:1_monitor_5000 node1" [ style=bold color="green" fontcolor="black" ] "stonithclone:1_start_0 node1" -> "stonithclone:1_monitor_5000 node1" [ style = bold] "stonithclone:1_start_0 node1" -> "stonithcloneset_running_0" [ style = bold] "stonithclone:1_start_0 node1" [ style=bold color="green" fontcolor="black" ] "stonithcloneset_running_0" [ style=bold color="green" fontcolor="orange" ] "stonithcloneset_start_0" -> "stonithclone:1_start_0 node1" [ style = bold] "stonithcloneset_start_0" -> "stonithcloneset_running_0" [ style = bold] "stonithcloneset_start_0" [ style=bold color="green" fontcolor="orange" ] } diff --git a/pengine/test10/novell-252693-3.exp b/pengine/test10/novell-252693-3.exp index 2cdf5cfb36..58ef3c103b 100644 --- a/pengine/test10/novell-252693-3.exp +++ b/pengine/test10/novell-252693-3.exp @@ -1,894 +1,897 @@ + + + diff --git a/pengine/test10/novell-252693.dot b/pengine/test10/novell-252693.dot index 2d8b3c07c6..09884849b7 100644 --- a/pengine/test10/novell-252693.dot +++ b/pengine/test10/novell-252693.dot @@ -1,118 +1,120 @@ digraph "g" { "all_stopped" -> "stonithclone:1_stop_0 node1" [ style = bold] "all_stopped" [ style=bold color="green" fontcolor="orange" ] "configstoreclone:0_post_notify_stop_0 node2" -> "configstorecloneset_confirmed-post_notify_stopped_0" [ style = bold] "configstoreclone:0_post_notify_stop_0 node2" [ style=bold color="green" fontcolor="black" ] "configstoreclone:0_pre_notify_stop_0 node2" -> "configstorecloneset_confirmed-pre_notify_stop_0" [ style = bold] "configstoreclone:0_pre_notify_stop_0 node2" [ style=bold color="green" fontcolor="black" ] "configstoreclone:1_pre_notify_stop_0 node1" -> "configstorecloneset_confirmed-pre_notify_stop_0" [ style = bold] "configstoreclone:1_pre_notify_stop_0 node1" [ style=bold color="green" fontcolor="black" ] "configstoreclone:1_stop_0 node1" -> "all_stopped" [ style = bold] "configstoreclone:1_stop_0 node1" -> "configstorecloneset_stopped_0" [ style = bold] "configstoreclone:1_stop_0 node1" -> "do_shutdown node1" [ style = bold] "configstoreclone:1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] "configstorecloneset_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] "configstorecloneset_confirmed-post_notify_stopped_0" -> "evmscloneset_stop_0" [ style = bold] "configstorecloneset_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "configstorecloneset_confirmed-pre_notify_stop_0" -> "configstorecloneset_post_notify_stopped_0" [ style = bold] "configstorecloneset_confirmed-pre_notify_stop_0" -> "configstorecloneset_stop_0" [ style = bold] "configstorecloneset_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "configstorecloneset_post_notify_stopped_0" -> "configstoreclone:0_post_notify_stop_0 node2" [ style = bold] "configstorecloneset_post_notify_stopped_0" -> "configstorecloneset_confirmed-post_notify_stopped_0" [ style = bold] "configstorecloneset_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "configstorecloneset_pre_notify_stop_0" -> "configstoreclone:0_pre_notify_stop_0 node2" [ style = bold] "configstorecloneset_pre_notify_stop_0" -> "configstoreclone:1_pre_notify_stop_0 node1" [ style = bold] "configstorecloneset_pre_notify_stop_0" -> "configstorecloneset_confirmed-pre_notify_stop_0" [ style = bold] "configstorecloneset_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "configstorecloneset_stop_0" -> "configstoreclone:1_stop_0 node1" [ style = bold] "configstorecloneset_stop_0" -> "configstorecloneset_stopped_0" [ style = bold] "configstorecloneset_stop_0" [ style=bold color="green" fontcolor="orange" ] "configstorecloneset_stopped_0" -> "configstorecloneset_post_notify_stopped_0" [ style = bold] "configstorecloneset_stopped_0" -> "evmscloneset_stop_0" [ style = bold] "configstorecloneset_stopped_0" [ style=bold color="green" fontcolor="orange" ] "do_shutdown node1" [ style=bold color="green" fontcolor="black" ] "evmsclone:0_post_notify_stop_0 node2" -> "evmscloneset_confirmed-post_notify_stopped_0" [ style = bold] "evmsclone:0_post_notify_stop_0 node2" [ style=bold color="green" fontcolor="black" ] "evmsclone:0_pre_notify_stop_0 node2" -> "evmscloneset_confirmed-pre_notify_stop_0" [ style = bold] "evmsclone:0_pre_notify_stop_0 node2" [ style=bold color="green" fontcolor="black" ] "evmsclone:1_pre_notify_stop_0 node1" -> "evmscloneset_confirmed-pre_notify_stop_0" [ style = bold] "evmsclone:1_pre_notify_stop_0 node1" [ style=bold color="green" fontcolor="black" ] "evmsclone:1_stop_0 node1" -> "all_stopped" [ style = bold] "evmsclone:1_stop_0 node1" -> "do_shutdown node1" [ style = bold] "evmsclone:1_stop_0 node1" -> "evmscloneset_stopped_0" [ style = bold] "evmsclone:1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] "evmscloneset_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] "evmscloneset_confirmed-post_notify_stopped_0" -> "evmsdcloneset_stop_0" [ style = bold] "evmscloneset_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "evmscloneset_confirmed-pre_notify_stop_0" -> "evmscloneset_post_notify_stopped_0" [ style = bold] "evmscloneset_confirmed-pre_notify_stop_0" -> "evmscloneset_stop_0" [ style = bold] "evmscloneset_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "evmscloneset_post_notify_stopped_0" -> "evmsclone:0_post_notify_stop_0 node2" [ style = bold] "evmscloneset_post_notify_stopped_0" -> "evmscloneset_confirmed-post_notify_stopped_0" [ style = bold] "evmscloneset_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "evmscloneset_pre_notify_stop_0" -> "evmsclone:0_pre_notify_stop_0 node2" [ style = bold] "evmscloneset_pre_notify_stop_0" -> "evmsclone:1_pre_notify_stop_0 node1" [ style = bold] "evmscloneset_pre_notify_stop_0" -> "evmscloneset_confirmed-pre_notify_stop_0" [ style = bold] "evmscloneset_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "evmscloneset_stop_0" -> "evmsclone:1_stop_0 node1" [ style = bold] "evmscloneset_stop_0" -> "evmscloneset_stopped_0" [ style = bold] "evmscloneset_stop_0" [ style=bold color="green" fontcolor="orange" ] "evmscloneset_stopped_0" -> "evmscloneset_post_notify_stopped_0" [ style = bold] "evmscloneset_stopped_0" -> "evmsdcloneset_stop_0" [ style = bold] "evmscloneset_stopped_0" [ style=bold color="green" fontcolor="orange" ] "evmsdclone:1_stop_0 node1" -> "all_stopped" [ style = bold] "evmsdclone:1_stop_0 node1" -> "do_shutdown node1" [ style = bold] "evmsdclone:1_stop_0 node1" -> "evmsdcloneset_stopped_0" [ style = bold] "evmsdclone:1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] "evmsdcloneset_stop_0" -> "evmsdclone:1_stop_0 node1" [ style = bold] "evmsdcloneset_stop_0" -> "evmsdcloneset_stopped_0" [ style = bold] "evmsdcloneset_stop_0" [ style=bold color="green" fontcolor="orange" ] "evmsdcloneset_stopped_0" [ style=bold color="green" fontcolor="orange" ] "imagestoreclone:0_post_notify_stop_0 node2" -> "imagestorecloneset_confirmed-post_notify_stopped_0" [ style = bold] "imagestoreclone:0_post_notify_stop_0 node2" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:0_pre_notify_stop_0 node2" -> "imagestorecloneset_confirmed-pre_notify_stop_0" [ style = bold] "imagestoreclone:0_pre_notify_stop_0 node2" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:1_pre_notify_stop_0 node1" -> "imagestorecloneset_confirmed-pre_notify_stop_0" [ style = bold] "imagestoreclone:1_pre_notify_stop_0 node1" [ style=bold color="green" fontcolor="black" ] "imagestoreclone:1_stop_0 node1" -> "all_stopped" [ style = bold] "imagestoreclone:1_stop_0 node1" -> "do_shutdown node1" [ style = bold] "imagestoreclone:1_stop_0 node1" -> "imagestorecloneset_stopped_0" [ style = bold] "imagestoreclone:1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] "imagestorecloneset_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] "imagestorecloneset_confirmed-post_notify_stopped_0" -> "evmscloneset_stop_0" [ style = bold] "imagestorecloneset_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_confirmed-pre_notify_stop_0" -> "imagestorecloneset_post_notify_stopped_0" [ style = bold] "imagestorecloneset_confirmed-pre_notify_stop_0" -> "imagestorecloneset_stop_0" [ style = bold] "imagestorecloneset_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_post_notify_stopped_0" -> "imagestoreclone:0_post_notify_stop_0 node2" [ style = bold] "imagestorecloneset_post_notify_stopped_0" -> "imagestorecloneset_confirmed-post_notify_stopped_0" [ style = bold] "imagestorecloneset_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_pre_notify_stop_0" -> "imagestoreclone:0_pre_notify_stop_0 node2" [ style = bold] "imagestorecloneset_pre_notify_stop_0" -> "imagestoreclone:1_pre_notify_stop_0 node1" [ style = bold] "imagestorecloneset_pre_notify_stop_0" -> "imagestorecloneset_confirmed-pre_notify_stop_0" [ style = bold] "imagestorecloneset_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_stop_0" -> "imagestoreclone:1_stop_0 node1" [ style = bold] "imagestorecloneset_stop_0" -> "imagestorecloneset_stopped_0" [ style = bold] "imagestorecloneset_stop_0" [ style=bold color="green" fontcolor="orange" ] "imagestorecloneset_stopped_0" -> "evmscloneset_stop_0" [ style = bold] "imagestorecloneset_stopped_0" -> "imagestorecloneset_post_notify_stopped_0" [ style = bold] "imagestorecloneset_stopped_0" [ style=bold color="green" fontcolor="orange" ] "sles10_migrate_from_0 node2" -> "all_stopped" [ style = bold] "sles10_migrate_from_0 node2" -> "do_shutdown node1" [ style = bold] "sles10_migrate_from_0 node2" -> "sles10_monitor_10000 node2" [ style = bold] "sles10_migrate_from_0 node2" [ style=bold color="green" fontcolor="black" ] "sles10_migrate_to_0 node1" -> "all_stopped" [ style = bold] +"sles10_migrate_to_0 node1" -> "configstoreclone:1_stop_0 node1" [ style = bold] "sles10_migrate_to_0 node1" -> "configstorecloneset_stop_0" [ style = bold] "sles10_migrate_to_0 node1" -> "do_shutdown node1" [ style = bold] +"sles10_migrate_to_0 node1" -> "imagestoreclone:1_stop_0 node1" [ style = bold] "sles10_migrate_to_0 node1" -> "imagestorecloneset_stop_0" [ style = bold] "sles10_migrate_to_0 node1" -> "sles10_migrate_from_0 node2" [ style = bold] "sles10_migrate_to_0 node1" [ style=bold color="green" fontcolor="black" ] "sles10_monitor_10000 node2" [ style=bold color="green" fontcolor="black" ] "stonithclone:1_stop_0 node1" -> "do_shutdown node1" [ style = bold] "stonithclone:1_stop_0 node1" -> "stonithcloneset_stopped_0" [ style = bold] "stonithclone:1_stop_0 node1" [ style=bold color="green" fontcolor="black" ] "stonithcloneset_stop_0" -> "stonithclone:1_stop_0 node1" [ style = bold] "stonithcloneset_stop_0" -> "stonithcloneset_stopped_0" [ style = bold] "stonithcloneset_stop_0" [ style=bold color="green" fontcolor="orange" ] "stonithcloneset_stopped_0" [ style=bold color="green" fontcolor="orange" ] } diff --git a/pengine/test10/novell-252693.exp b/pengine/test10/novell-252693.exp index 969ff2215c..4530899b63 100644 --- a/pengine/test10/novell-252693.exp +++ b/pengine/test10/novell-252693.exp @@ -1,609 +1,615 @@ + + + + + + diff --git a/pengine/test10/probe-2.dot b/pengine/test10/probe-2.dot new file mode 100644 index 0000000000..c28b7e5f65 --- /dev/null +++ b/pengine/test10/probe-2.dot @@ -0,0 +1,228 @@ +digraph "g" { +"Cancel drbd_mysql:0_monitor_10000 wc01" -> "drbd_mysql:0_promote_0 wc01" [ style = bold] +"Cancel drbd_mysql:0_monitor_10000 wc01" [ style=bold color="green" fontcolor="black" ] +"DoFencing_running_0" [ style=bold color="green" fontcolor="orange" ] +"DoFencing_start_0" -> "DoFencing_running_0" [ style = bold] +"DoFencing_start_0" -> "stonith_rackpdu:0_start_0 wc01" [ style = bold] +"DoFencing_start_0" [ style=bold color="green" fontcolor="orange" ] +"DoFencing_stop_0" -> "DoFencing_start_0" [ style = bold] +"DoFencing_stop_0" -> "DoFencing_stopped_0" [ style = bold] +"DoFencing_stop_0" -> "stonith_rackpdu:0_stop_0 wc01" [ style = bold] +"DoFencing_stop_0" -> "stonith_rackpdu:1_stop_0 wc02" [ style = bold] +"DoFencing_stop_0" [ style=bold color="green" fontcolor="orange" ] +"DoFencing_stopped_0" -> "DoFencing_start_0" [ style = bold] +"DoFencing_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"all_stopped" -> "stonith_rackpdu:1_stop_0 wc02" [ style = bold] +"all_stopped" [ style=bold color="green" fontcolor="orange" ] +"apache2:1_stop_0 wc02" -> "all_stopped" [ style = bold] +"apache2:1_stop_0 wc02" -> "fs_www:1_stop_0 wc02" [ style = bold] +"apache2:1_stop_0 wc02" -> "group_webservice:1_stopped_0" [ style = bold] +"apache2:1_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"clone_mysql-proxy_stop_0" -> "clone_mysql-proxy_stopped_0" [ style = bold] +"clone_mysql-proxy_stop_0" -> "group_mysql-proxy:1_stop_0" [ style = bold] +"clone_mysql-proxy_stop_0" [ style=bold color="green" fontcolor="orange" ] +"clone_mysql-proxy_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"clone_nfs-common_stop_0" -> "clone_nfs-common_stopped_0" [ style = bold] +"clone_nfs-common_stop_0" -> "group_nfs-common:1_stop_0" [ style = bold] +"clone_nfs-common_stop_0" [ style=bold color="green" fontcolor="orange" ] +"clone_nfs-common_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"clone_webservice_stop_0" -> "clone_webservice_stopped_0" [ style = bold] +"clone_webservice_stop_0" -> "group_webservice:1_stop_0" [ style = bold] +"clone_webservice_stop_0" [ style=bold color="green" fontcolor="orange" ] +"clone_webservice_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"drbd_mysql:0_monitor_5000 wc01" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:0_post_notify_demote_0 wc01" -> "ms_drbd_mysql_confirmed-post_notify_demoted_0" [ style = bold] +"drbd_mysql:0_post_notify_demote_0 wc01" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:0_post_notify_promote_0 wc01" -> "ms_drbd_mysql_confirmed-post_notify_promoted_0" [ style = bold] +"drbd_mysql:0_post_notify_promote_0 wc01" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:0_post_notify_stop_0 wc01" -> "ms_drbd_mysql_confirmed-post_notify_stopped_0" [ style = bold] +"drbd_mysql:0_post_notify_stop_0 wc01" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:0_pre_notify_demote_0 wc01" -> "ms_drbd_mysql_confirmed-pre_notify_demote_0" [ style = bold] +"drbd_mysql:0_pre_notify_demote_0 wc01" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:0_pre_notify_promote_0 wc01" -> "ms_drbd_mysql_confirmed-pre_notify_promote_0" [ style = bold] +"drbd_mysql:0_pre_notify_promote_0 wc01" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:0_pre_notify_stop_0 wc01" -> "ms_drbd_mysql_confirmed-pre_notify_stop_0" [ style = bold] +"drbd_mysql:0_pre_notify_stop_0 wc01" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:0_promote_0 wc01" -> "drbd_mysql:0_monitor_5000 wc01" [ style = bold] +"drbd_mysql:0_promote_0 wc01" -> "ms_drbd_mysql_promoted_0" [ style = bold] +"drbd_mysql:0_promote_0 wc01" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:1_demote_0 wc02" -> "drbd_mysql:1_stop_0 wc02" [ style = bold] +"drbd_mysql:1_demote_0 wc02" -> "ms_drbd_mysql_demoted_0" [ style = bold] +"drbd_mysql:1_demote_0 wc02" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:1_post_notify_demote_0 wc02" -> "ms_drbd_mysql_confirmed-post_notify_demoted_0" [ style = bold] +"drbd_mysql:1_post_notify_demote_0 wc02" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:1_pre_notify_demote_0 wc02" -> "ms_drbd_mysql_confirmed-pre_notify_demote_0" [ style = bold] +"drbd_mysql:1_pre_notify_demote_0 wc02" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:1_pre_notify_stop_0 wc02" -> "ms_drbd_mysql_confirmed-pre_notify_stop_0" [ style = bold] +"drbd_mysql:1_pre_notify_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"drbd_mysql:1_stop_0 wc02" -> "all_stopped" [ style = bold] +"drbd_mysql:1_stop_0 wc02" -> "ms_drbd_mysql_stopped_0" [ style = bold] +"drbd_mysql:1_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"drbd_www:0_post_notify_stop_0 wc01" -> "ms_drbd_www_confirmed-post_notify_stopped_0" [ style = bold] +"drbd_www:0_post_notify_stop_0 wc01" [ style=bold color="green" fontcolor="black" ] +"drbd_www:0_pre_notify_stop_0 wc01" -> "ms_drbd_www_confirmed-pre_notify_stop_0" [ style = bold] +"drbd_www:0_pre_notify_stop_0 wc01" [ style=bold color="green" fontcolor="black" ] +"drbd_www:1_pre_notify_stop_0 wc02" -> "ms_drbd_www_confirmed-pre_notify_stop_0" [ style = bold] +"drbd_www:1_pre_notify_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"drbd_www:1_stop_0 wc02" -> "all_stopped" [ style = bold] +"drbd_www:1_stop_0 wc02" -> "ms_drbd_www_stopped_0" [ style = bold] +"drbd_www:1_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"fs_mysql_monitor_30000 wc01" [ style=bold color="green" fontcolor="black" ] +"fs_mysql_start_0 wc01" -> "fs_mysql_monitor_30000 wc01" [ style = bold] +"fs_mysql_start_0 wc01" -> "group_mysql_running_0" [ style = bold] +"fs_mysql_start_0 wc01" -> "intip_sql_start_0 wc01" [ style = bold] +"fs_mysql_start_0 wc01" [ style=bold color="green" fontcolor="black" ] +"fs_mysql_stop_0 wc02" -> "all_stopped" [ style = bold] +"fs_mysql_stop_0 wc02" -> "fs_mysql_start_0 wc01" [ style = bold] +"fs_mysql_stop_0 wc02" -> "group_mysql_stopped_0" [ style = bold] +"fs_mysql_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"fs_www:1_stop_0 wc02" -> "all_stopped" [ style = bold] +"fs_www:1_stop_0 wc02" -> "group_webservice:1_stopped_0" [ style = bold] +"fs_www:1_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"group_mysql-proxy:1_stop_0" -> "group_mysql-proxy:1_stopped_0" [ style = bold] +"group_mysql-proxy:1_stop_0" -> "mysql-proxy:1_stop_0 wc02" [ style = bold] +"group_mysql-proxy:1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"group_mysql-proxy:1_stopped_0" -> "clone_mysql-proxy_stopped_0" [ style = bold] +"group_mysql-proxy:1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"group_mysql_running_0" [ style=bold color="green" fontcolor="orange" ] +"group_mysql_start_0" -> "fs_mysql_start_0 wc01" [ style = bold] +"group_mysql_start_0" -> "group_mysql_running_0" [ style = bold] +"group_mysql_start_0" -> "intip_sql_start_0 wc01" [ style = bold] +"group_mysql_start_0" -> "mysql-server_start_0 wc01" [ style = bold] +"group_mysql_start_0" [ style=bold color="green" fontcolor="orange" ] +"group_mysql_stop_0" -> "fs_mysql_stop_0 wc02" [ style = bold] +"group_mysql_stop_0" -> "group_mysql_start_0" [ style = bold] +"group_mysql_stop_0" -> "group_mysql_stopped_0" [ style = bold] +"group_mysql_stop_0" -> "intip_sql_stop_0 wc02" [ style = bold] +"group_mysql_stop_0" -> "mysql-server_stop_0 wc02" [ style = bold] +"group_mysql_stop_0" [ style=bold color="green" fontcolor="orange" ] +"group_mysql_stopped_0" -> "drbd_mysql:1_demote_0 wc02" [ style = bold] +"group_mysql_stopped_0" -> "group_mysql_start_0" [ style = bold] +"group_mysql_stopped_0" -> "ms_drbd_mysql_demote_0" [ style = bold] +"group_mysql_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"group_nfs-common:1_stop_0" -> "group_nfs-common:1_stopped_0" [ style = bold] +"group_nfs-common:1_stop_0" -> "nfs-common:1_stop_0 wc02" [ style = bold] +"group_nfs-common:1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"group_nfs-common:1_stopped_0" -> "clone_nfs-common_stopped_0" [ style = bold] +"group_nfs-common:1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"group_webservice:1_stop_0" -> "apache2:1_stop_0 wc02" [ style = bold] +"group_webservice:1_stop_0" -> "fs_www:1_stop_0 wc02" [ style = bold] +"group_webservice:1_stop_0" -> "group_webservice:1_stopped_0" [ style = bold] +"group_webservice:1_stop_0" [ style=bold color="green" fontcolor="orange" ] +"group_webservice:1_stopped_0" -> "clone_webservice_stopped_0" [ style = bold] +"group_webservice:1_stopped_0" -> "group_nfs-common:1_stop_0" [ style = bold] +"group_webservice:1_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"intip_sql_monitor_30000 wc01" [ style=bold color="green" fontcolor="black" ] +"intip_sql_start_0 wc01" -> "group_mysql_running_0" [ style = bold] +"intip_sql_start_0 wc01" -> "intip_sql_monitor_30000 wc01" [ style = bold] +"intip_sql_start_0 wc01" -> "mysql-server_start_0 wc01" [ style = bold] +"intip_sql_start_0 wc01" [ style=bold color="green" fontcolor="black" ] +"intip_sql_stop_0 wc02" -> "all_stopped" [ style = bold] +"intip_sql_stop_0 wc02" -> "fs_mysql_stop_0 wc02" [ style = bold] +"intip_sql_stop_0 wc02" -> "group_mysql_stopped_0" [ style = bold] +"intip_sql_stop_0 wc02" -> "intip_sql_start_0 wc01" [ style = bold] +"intip_sql_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"ms_drbd_mysql_confirmed-post_notify_demoted_0" -> "drbd_mysql:0_monitor_5000 wc01" [ style = bold] +"ms_drbd_mysql_confirmed-post_notify_demoted_0" -> "ms_drbd_mysql_pre_notify_promote_0" [ style = bold] +"ms_drbd_mysql_confirmed-post_notify_demoted_0" -> "ms_drbd_mysql_pre_notify_stop_0" [ style = bold] +"ms_drbd_mysql_confirmed-post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_confirmed-post_notify_promoted_0" -> "drbd_mysql:0_monitor_5000 wc01" [ style = bold] +"ms_drbd_mysql_confirmed-post_notify_promoted_0" -> "group_mysql_start_0" [ style = bold] +"ms_drbd_mysql_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] +"ms_drbd_mysql_confirmed-post_notify_stopped_0" -> "drbd_mysql:0_monitor_5000 wc01" [ style = bold] +"ms_drbd_mysql_confirmed-post_notify_stopped_0" -> "ms_drbd_mysql_pre_notify_promote_0" [ style = bold] +"ms_drbd_mysql_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_confirmed-pre_notify_demote_0" -> "ms_drbd_mysql_demote_0" [ style = bold] +"ms_drbd_mysql_confirmed-pre_notify_demote_0" -> "ms_drbd_mysql_post_notify_demoted_0" [ style = bold] +"ms_drbd_mysql_confirmed-pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_confirmed-pre_notify_promote_0" -> "ms_drbd_mysql_post_notify_promoted_0" [ style = bold] +"ms_drbd_mysql_confirmed-pre_notify_promote_0" -> "ms_drbd_mysql_promote_0" [ style = bold] +"ms_drbd_mysql_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_confirmed-pre_notify_stop_0" -> "ms_drbd_mysql_post_notify_stopped_0" [ style = bold] +"ms_drbd_mysql_confirmed-pre_notify_stop_0" -> "ms_drbd_mysql_stop_0" [ style = bold] +"ms_drbd_mysql_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_demote_0" -> "drbd_mysql:1_demote_0 wc02" [ style = bold] +"ms_drbd_mysql_demote_0" -> "ms_drbd_mysql_demoted_0" [ style = bold] +"ms_drbd_mysql_demote_0" -> "ms_drbd_mysql_stop_0" [ style = bold] +"ms_drbd_mysql_demote_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_demoted_0" -> "ms_drbd_mysql_post_notify_demoted_0" [ style = bold] +"ms_drbd_mysql_demoted_0" -> "ms_drbd_mysql_promote_0" [ style = bold] +"ms_drbd_mysql_demoted_0" -> "ms_drbd_mysql_stop_0" [ style = bold] +"ms_drbd_mysql_demoted_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_post_notify_demoted_0" -> "drbd_mysql:0_post_notify_demote_0 wc01" [ style = bold] +"ms_drbd_mysql_post_notify_demoted_0" -> "drbd_mysql:1_post_notify_demote_0 wc02" [ style = bold] +"ms_drbd_mysql_post_notify_demoted_0" -> "ms_drbd_mysql_confirmed-post_notify_demoted_0" [ style = bold] +"ms_drbd_mysql_post_notify_demoted_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_post_notify_promoted_0" -> "drbd_mysql:0_post_notify_promote_0 wc01" [ style = bold] +"ms_drbd_mysql_post_notify_promoted_0" -> "ms_drbd_mysql_confirmed-post_notify_promoted_0" [ style = bold] +"ms_drbd_mysql_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_post_notify_stopped_0" -> "drbd_mysql:0_post_notify_stop_0 wc01" [ style = bold] +"ms_drbd_mysql_post_notify_stopped_0" -> "ms_drbd_mysql_confirmed-post_notify_stopped_0" [ style = bold] +"ms_drbd_mysql_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_pre_notify_demote_0" -> "drbd_mysql:0_pre_notify_demote_0 wc01" [ style = bold] +"ms_drbd_mysql_pre_notify_demote_0" -> "drbd_mysql:1_pre_notify_demote_0 wc02" [ style = bold] +"ms_drbd_mysql_pre_notify_demote_0" -> "ms_drbd_mysql_confirmed-pre_notify_demote_0" [ style = bold] +"ms_drbd_mysql_pre_notify_demote_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_pre_notify_promote_0" -> "drbd_mysql:0_pre_notify_promote_0 wc01" [ style = bold] +"ms_drbd_mysql_pre_notify_promote_0" -> "ms_drbd_mysql_confirmed-pre_notify_promote_0" [ style = bold] +"ms_drbd_mysql_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_pre_notify_stop_0" -> "drbd_mysql:0_pre_notify_stop_0 wc01" [ style = bold] +"ms_drbd_mysql_pre_notify_stop_0" -> "drbd_mysql:1_pre_notify_stop_0 wc02" [ style = bold] +"ms_drbd_mysql_pre_notify_stop_0" -> "ms_drbd_mysql_confirmed-pre_notify_stop_0" [ style = bold] +"ms_drbd_mysql_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_promote_0" -> "drbd_mysql:0_promote_0 wc01" [ style = bold] +"ms_drbd_mysql_promote_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_promoted_0" -> "group_mysql_start_0" [ style = bold] +"ms_drbd_mysql_promoted_0" -> "ms_drbd_mysql_post_notify_promoted_0" [ style = bold] +"ms_drbd_mysql_promoted_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_stop_0" -> "drbd_mysql:1_stop_0 wc02" [ style = bold] +"ms_drbd_mysql_stop_0" -> "ms_drbd_mysql_stopped_0" [ style = bold] +"ms_drbd_mysql_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_mysql_stopped_0" -> "ms_drbd_mysql_post_notify_stopped_0" [ style = bold] +"ms_drbd_mysql_stopped_0" -> "ms_drbd_mysql_promote_0" [ style = bold] +"ms_drbd_mysql_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_www_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] +"ms_drbd_www_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_www_confirmed-pre_notify_stop_0" -> "ms_drbd_www_post_notify_stopped_0" [ style = bold] +"ms_drbd_www_confirmed-pre_notify_stop_0" -> "ms_drbd_www_stop_0" [ style = bold] +"ms_drbd_www_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_www_post_notify_stopped_0" -> "drbd_www:0_post_notify_stop_0 wc01" [ style = bold] +"ms_drbd_www_post_notify_stopped_0" -> "ms_drbd_www_confirmed-post_notify_stopped_0" [ style = bold] +"ms_drbd_www_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_www_pre_notify_stop_0" -> "drbd_www:0_pre_notify_stop_0 wc01" [ style = bold] +"ms_drbd_www_pre_notify_stop_0" -> "drbd_www:1_pre_notify_stop_0 wc02" [ style = bold] +"ms_drbd_www_pre_notify_stop_0" -> "ms_drbd_www_confirmed-pre_notify_stop_0" [ style = bold] +"ms_drbd_www_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_www_stop_0" -> "drbd_www:1_stop_0 wc02" [ style = bold] +"ms_drbd_www_stop_0" -> "ms_drbd_www_stopped_0" [ style = bold] +"ms_drbd_www_stop_0" [ style=bold color="green" fontcolor="orange" ] +"ms_drbd_www_stopped_0" -> "ms_drbd_www_post_notify_stopped_0" [ style = bold] +"ms_drbd_www_stopped_0" [ style=bold color="green" fontcolor="orange" ] +"mysql-proxy:1_stop_0 wc02" -> "all_stopped" [ style = bold] +"mysql-proxy:1_stop_0 wc02" -> "group_mysql-proxy:1_stopped_0" [ style = bold] +"mysql-proxy:1_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"mysql-server_monitor_30000 wc01" [ style=bold color="green" fontcolor="black" ] +"mysql-server_start_0 wc01" -> "group_mysql_running_0" [ style = bold] +"mysql-server_start_0 wc01" -> "mysql-server_monitor_30000 wc01" [ style = bold] +"mysql-server_start_0 wc01" [ style=bold color="green" fontcolor="black" ] +"mysql-server_stop_0 wc02" -> "all_stopped" [ style = bold] +"mysql-server_stop_0 wc02" -> "group_mysql_stopped_0" [ style = bold] +"mysql-server_stop_0 wc02" -> "intip_sql_stop_0 wc02" [ style = bold] +"mysql-server_stop_0 wc02" -> "mysql-server_start_0 wc01" [ style = bold] +"mysql-server_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"nfs-common:1_stop_0 wc02" -> "all_stopped" [ style = bold] +"nfs-common:1_stop_0 wc02" -> "group_nfs-common:1_stopped_0" [ style = bold] +"nfs-common:1_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +"stonith_rackpdu:0_monitor_5000 wc01" [ style=bold color="green" fontcolor="black" ] +"stonith_rackpdu:0_start_0 wc01" -> "DoFencing_running_0" [ style = bold] +"stonith_rackpdu:0_start_0 wc01" -> "stonith_rackpdu:0_monitor_5000 wc01" [ style = bold] +"stonith_rackpdu:0_start_0 wc01" -> "stonith_rackpdu:0_start_20000 wc01" [ style = bold] +"stonith_rackpdu:0_start_0 wc01" [ style=bold color="green" fontcolor="black" ] +"stonith_rackpdu:0_start_20000 wc01" [ style=bold color="green" fontcolor="black" ] +"stonith_rackpdu:0_stop_0 wc01" -> "DoFencing_stopped_0" [ style = bold] +"stonith_rackpdu:0_stop_0 wc01" -> "stonith_rackpdu:0_start_0 wc01" [ style = bold] +"stonith_rackpdu:0_stop_0 wc01" [ style=bold color="green" fontcolor="black" ] +"stonith_rackpdu:1_stop_0 wc02" -> "DoFencing_stopped_0" [ style = bold] +"stonith_rackpdu:1_stop_0 wc02" [ style=bold color="green" fontcolor="black" ] +} diff --git a/pengine/test10/probe-2.exp b/pengine/test10/probe-2.exp new file mode 100644 index 0000000000..fef4a88cd8 --- /dev/null +++ b/pengine/test10/probe-2.exp @@ -0,0 +1,1195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/probe-2.scores b/pengine/test10/probe-2.scores new file mode 100644 index 0000000000..f44d15d2bf --- /dev/null +++ b/pengine/test10/probe-2.scores @@ -0,0 +1,155 @@ +Allocation scores: +group_color: group_www_data allocation score on wc01: 0 +group_color: group_www_data allocation score on wc02: 0 +group_color: fs_www_data allocation score on wc01: 0 +group_color: fs_www_data allocation score on wc02: 0 +group_color: nfs-kernel-server allocation score on wc01: 0 +group_color: nfs-kernel-server allocation score on wc02: 0 +group_color: intip_nfs allocation score on wc01: 0 +group_color: intip_nfs allocation score on wc02: 0 +clone_color: clone_nfs-common allocation score on wc01: 0 +clone_color: clone_nfs-common allocation score on wc02: -1000000 +clone_color: group_nfs-common:0 allocation score on wc01: 0 +clone_color: group_nfs-common:0 allocation score on wc02: 0 +clone_color: nfs-common:0 allocation score on wc01: 1 +clone_color: nfs-common:0 allocation score on wc02: 0 +clone_color: group_nfs-common:1 allocation score on wc01: 0 +clone_color: group_nfs-common:1 allocation score on wc02: 0 +clone_color: nfs-common:1 allocation score on wc01: 0 +clone_color: nfs-common:1 allocation score on wc02: 1 +group_color: group_nfs-common:0 allocation score on wc01: 0 +group_color: group_nfs-common:0 allocation score on wc02: -1000000 +group_color: nfs-common:0 allocation score on wc01: 1 +group_color: nfs-common:0 allocation score on wc02: -1000000 +native_color: nfs-common:0 allocation score on wc01: 1 +native_color: nfs-common:0 allocation score on wc02: -1000000 +group_color: group_nfs-common:1 allocation score on wc01: -1000000 +group_color: group_nfs-common:1 allocation score on wc02: -1000000 +group_color: nfs-common:1 allocation score on wc01: -1000000 +group_color: nfs-common:1 allocation score on wc02: -1000000 +native_color: nfs-common:1 allocation score on wc01: -1000000 +native_color: nfs-common:1 allocation score on wc02: -1000000 +clone_color: ms_drbd_www allocation score on wc01: 1 +clone_color: ms_drbd_www allocation score on wc02: -1000000 +clone_color: drbd_www:0 allocation score on wc01: 76 +clone_color: drbd_www:0 allocation score on wc02: 0 +clone_color: drbd_www:1 allocation score on wc01: 0 +clone_color: drbd_www:1 allocation score on wc02: 1 +native_color: drbd_www:0 allocation score on wc01: 76 +native_color: drbd_www:0 allocation score on wc02: -1000000 +native_color: drbd_www:1 allocation score on wc01: -1000000 +native_color: drbd_www:1 allocation score on wc02: -1000000 +drbd_www:0 promotion score on wc01: 177 +drbd_www:1 promotion score on none: 0 +native_color: fs_www_data allocation score on wc01: 77 +native_color: fs_www_data allocation score on wc02: -1000000 +native_color: nfs-kernel-server allocation score on wc01: 0 +native_color: nfs-kernel-server allocation score on wc02: -1000000 +native_color: intip_nfs allocation score on wc01: 0 +native_color: intip_nfs allocation score on wc02: -1000000 +clone_color: ms_drbd_mysql allocation score on wc01: 0 +clone_color: ms_drbd_mysql allocation score on wc02: -1000000 +clone_color: drbd_mysql:0 allocation score on wc01: 76 +clone_color: drbd_mysql:0 allocation score on wc02: 0 +clone_color: drbd_mysql:1 allocation score on wc01: 0 +clone_color: drbd_mysql:1 allocation score on wc02: 1 +native_color: drbd_mysql:0 allocation score on wc01: 76 +native_color: drbd_mysql:0 allocation score on wc02: -1000000 +native_color: drbd_mysql:1 allocation score on wc01: -1000000 +native_color: drbd_mysql:1 allocation score on wc02: -1000000 +drbd_mysql:0 promotion score on wc01: 75 +drbd_mysql:1 promotion score on none: 0 +group_color: group_mysql allocation score on wc01: 0 +group_color: group_mysql allocation score on wc02: 0 +group_color: fs_mysql allocation score on wc01: 0 +group_color: fs_mysql allocation score on wc02: 0 +group_color: intip_sql allocation score on wc01: 0 +group_color: intip_sql allocation score on wc02: 0 +group_color: mysql-server allocation score on wc01: 0 +group_color: mysql-server allocation score on wc02: 0 +drbd_mysql:0 promotion score on wc01: 1000000 +drbd_mysql:1 promotion score on none: 0 +native_color: fs_mysql allocation score on wc01: 76 +native_color: fs_mysql allocation score on wc02: -1000000 +native_color: intip_sql allocation score on wc01: 0 +native_color: intip_sql allocation score on wc02: -1000000 +native_color: mysql-server allocation score on wc01: 0 +native_color: mysql-server allocation score on wc02: -1000000 +drbd_www:0 promotion score on wc01: 1000000 +drbd_www:1 promotion score on none: 0 +clone_color: clone_mysql-proxy allocation score on wc01: 0 +clone_color: clone_mysql-proxy allocation score on wc02: 0 +clone_color: group_mysql-proxy:0 allocation score on wc01: 0 +clone_color: group_mysql-proxy:0 allocation score on wc02: 0 +clone_color: mysql-proxy:0 allocation score on wc01: 1 +clone_color: mysql-proxy:0 allocation score on wc02: 0 +clone_color: group_mysql-proxy:1 allocation score on wc01: 0 +clone_color: group_mysql-proxy:1 allocation score on wc02: 0 +clone_color: mysql-proxy:1 allocation score on wc01: 0 +clone_color: mysql-proxy:1 allocation score on wc02: 1 +group_color: group_mysql-proxy:0 allocation score on wc01: 0 +group_color: group_mysql-proxy:0 allocation score on wc02: -1000000 +group_color: mysql-proxy:0 allocation score on wc01: 1 +group_color: mysql-proxy:0 allocation score on wc02: -1000000 +native_color: mysql-proxy:0 allocation score on wc01: 1 +native_color: mysql-proxy:0 allocation score on wc02: -1000000 +group_color: group_mysql-proxy:1 allocation score on wc01: -1000000 +group_color: group_mysql-proxy:1 allocation score on wc02: -1000000 +group_color: mysql-proxy:1 allocation score on wc01: -1000000 +group_color: mysql-proxy:1 allocation score on wc02: -1000000 +native_color: mysql-proxy:1 allocation score on wc01: -1000000 +native_color: mysql-proxy:1 allocation score on wc02: -1000000 +clone_color: clone_webservice allocation score on wc01: 0 +clone_color: clone_webservice allocation score on wc02: -1000000 +clone_color: group_webservice:0 allocation score on wc01: 0 +clone_color: group_webservice:0 allocation score on wc02: 0 +clone_color: fs_www:0 allocation score on wc01: 1 +clone_color: fs_www:0 allocation score on wc02: 0 +clone_color: apache2:0 allocation score on wc01: 1 +clone_color: apache2:0 allocation score on wc02: 0 +clone_color: group_webservice:1 allocation score on wc01: 0 +clone_color: group_webservice:1 allocation score on wc02: 0 +clone_color: fs_www:1 allocation score on wc01: 0 +clone_color: fs_www:1 allocation score on wc02: 1 +clone_color: apache2:1 allocation score on wc01: 0 +clone_color: apache2:1 allocation score on wc02: 1 +group_color: group_webservice:0 allocation score on wc01: 0 +group_color: group_webservice:0 allocation score on wc02: -1000000 +group_color: fs_www:0 allocation score on wc01: 1 +group_color: fs_www:0 allocation score on wc02: -1000000 +group_color: apache2:0 allocation score on wc01: 1 +group_color: apache2:0 allocation score on wc02: -1000000 +native_color: fs_www:0 allocation score on wc01: 3 +native_color: fs_www:0 allocation score on wc02: -1000000 +native_color: apache2:0 allocation score on wc01: 1 +native_color: apache2:0 allocation score on wc02: -1000000 +group_color: group_webservice:1 allocation score on wc01: -1000000 +group_color: group_webservice:1 allocation score on wc02: -1000000 +group_color: fs_www:1 allocation score on wc01: -1000000 +group_color: fs_www:1 allocation score on wc02: -1000000 +group_color: apache2:1 allocation score on wc01: -1000000 +group_color: apache2:1 allocation score on wc02: -1000000 +native_color: fs_www:1 allocation score on wc01: -1000000 +native_color: fs_www:1 allocation score on wc02: -1000000 +native_color: apache2:1 allocation score on wc01: -1000000 +native_color: apache2:1 allocation score on wc02: -1000000 +group_color: group_ftpd allocation score on wc01: 0 +group_color: group_ftpd allocation score on wc02: 0 +group_color: extip_ftp allocation score on wc01: 0 +group_color: extip_ftp allocation score on wc02: 0 +group_color: pure-ftpd allocation score on wc01: 0 +group_color: pure-ftpd allocation score on wc02: 0 +native_color: extip_ftp allocation score on wc01: 3 +native_color: extip_ftp allocation score on wc02: -1000000 +native_color: pure-ftpd allocation score on wc01: 0 +native_color: pure-ftpd allocation score on wc02: -1000000 +clone_color: DoFencing allocation score on wc01: 0 +clone_color: DoFencing allocation score on wc02: 0 +clone_color: stonith_rackpdu:0 allocation score on wc01: 1 +clone_color: stonith_rackpdu:0 allocation score on wc02: 0 +clone_color: stonith_rackpdu:1 allocation score on wc01: 0 +clone_color: stonith_rackpdu:1 allocation score on wc02: 1 +native_color: stonith_rackpdu:0 allocation score on wc01: 1 +native_color: stonith_rackpdu:0 allocation score on wc02: -1000000 +native_color: stonith_rackpdu:1 allocation score on wc01: -1000000 +native_color: stonith_rackpdu:1 allocation score on wc02: -1000000 diff --git a/pengine/test10/probe-2.xml b/pengine/test10/probe-2.xml new file mode 100644 index 0000000000..d48de8180f --- /dev/null +++ b/pengine/test10/probe-2.xml @@ -0,0 +1,480 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tools/shell/templates/Makefile.am b/tools/shell/templates/Makefile.am index fb59c3ed88..cad54743d1 100644 --- a/tools/shell/templates/Makefile.am +++ b/tools/shell/templates/Makefile.am @@ -1,26 +1,26 @@ # # doc: Pacemaker code # # Copyright (C) 2008 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in templatedir = $(datadir)/@PACKAGE@/templates -template_DATA = apache virtual-ip filesystem ocfs2 clvm +template_DATA = apache virtual-ip filesystem ocfs2 clvm gfs2-base gfs2 EXTRA_DIST = $(template_DATA) diff --git a/tools/shell/templates/gfs2 b/tools/shell/templates/gfs2 new file mode 100644 index 0000000000..f5d459e69f --- /dev/null +++ b/tools/shell/templates/gfs2 @@ -0,0 +1,68 @@ +%name gfs2 + +# gfs2 filesystem (cloned) +# +# This template generates a cloned instance of the ocfs2 filesystem +# +# The filesystem should be on the device, unless clvm is used +# To use clvm, pull it along with this template: +# new myfs ocfs2 clvm +# +# NB: You need just one dlm and o2cb, regardless of how many +# filesystems. In other words, you can use this template only for +# one filesystem and to make another one, you'll have to edit the +# resulting configuration yourself. + +%depends_on gfs2-base +%suggests clvm + +%required + +# Name the gfs2 filesystem +# (for example: bigfs) +# NB: The clone is going to be named c- (e.g. c-bigfs) + +%% id + +# The mount point + +%% directory + +# The device + +%% device + +# optional parameters for the gfs2 filesystem + +%optional + +# mount options + +%% options + +%generate + +primitive %_:id ocf:heartbeat:Filesystem + params + directory="%_:directory" + fstype="gfs2" + device="%_:device" + opt options="%_:options" + +monitor %_:id 20:40 + +clone c-%_:id %_:id + meta interleave="true" ordered="true" + +colocation colo-%_:id-gfs inf: c-%_:id gfs-clone + +order order-%_:id-gfs inf: gfs-clone c-%_:id + +# if there's clvm, generate some constraints too +# + +%if %clvm +colocation colo-%_:id-%clvm:id inf: c-%_:id c-%clvm:id + +order order-%_:id-%clvm:id inf: c-%clvm:id c-%_:id +%fi diff --git a/tools/shell/templates/gfs2-base b/tools/shell/templates/gfs2-base new file mode 100644 index 0000000000..2cafff0654 --- /dev/null +++ b/tools/shell/templates/gfs2-base @@ -0,0 +1,42 @@ +%name gfs2-base + +# gfs2 filesystem base (cloned) +# +# This template generates a cloned instance of the ocfs2 filesystem +# +# The filesystem should be on the device, unless clvm is used +# To use clvm, pull it along with this template: +# new myfs ocfs2 clvm +# +# NB: You need just one dlm and o2cb, regardless of how many +# filesystems. In other words, you can use this template only for +# one filesystem and to make another one, you'll have to edit the +# resulting configuration yourself. + +%suggests clvm +%required + +%generate + +primitive dlm ocf:pacemaker:controld + +clone dlm-clone dlm + meta interleave="true" ordered="true" + +primitive gfs-controld ocf:pacemaker:controld + +clone gfs-clone gfs-controld + meta interleave="true" ordered="true" + +colocation colo-gfs-dlm inf: gfs-clone dlm-clone + +order order-gfs-dlm inf: dlm-clone gfs-clone + +# if there's clvm, generate some constraints too +# + +%if %clvm +colocation colo-clvm-dlm inf: clvm-clone dlm-clone + +order order-clvm-dlm inf: dlm-clone clvm-clone +%fi