diff --git a/cts/lab/CTStests.py b/cts/lab/CTStests.py
index f4be998cfb..5535177b95 100644
--- a/cts/lab/CTStests.py
+++ b/cts/lab/CTStests.py
@@ -1,3142 +1,3142 @@
""" Test-specific classes for Pacemaker's Cluster Test Suite (CTS)
"""
__copyright__ = "Copyright 2000-2021 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
#
# SPECIAL NOTE:
#
# Tests may NOT implement any cluster-manager-specific code in them.
# EXTEND the ClusterManager object to provide the base capabilities
# the test needs if you need to do something that the current CM classes
# do not. Otherwise you screw up the whole point of the object structure
# in CTS.
#
# Thank you.
#
import os
import re
import time
import subprocess
import tempfile
from stat import *
from cts import CTS
from cts.CTSaudits import *
from cts.CTSvars import *
from cts.patterns import PatternSelector
from cts.logging import LogFactory
from cts.remote import RemoteFactory
from cts.watcher import LogWatcher
from cts.environment import EnvFactory
AllTestClasses = [ ]
class CTSTest(object):
'''
A Cluster test.
We implement the basic set of properties and behaviors for a generic
cluster test.
Cluster tests track their own statistics.
We keep each of the kinds of counts we track as separate {name,value}
pairs.
'''
def __init__(self, cm):
#self.name="the unnamed test"
self.Stats = {"calls":0
, "success":0
, "failure":0
, "skipped":0
, "auditfail":0}
# if not issubclass(cm.__class__, ClusterManager):
# raise ValueError("Must be a ClusterManager object")
self.CM = cm
self.Env = EnvFactory().getInstance()
self.rsh = RemoteFactory().getInstance()
self.logger = LogFactory()
self.templates = PatternSelector(cm["Name"])
self.Audits = []
self.timeout = 120
self.passed = 1
self.is_loop = 0
self.is_unsafe = 0
self.is_docker_unsafe = 0
self.is_experimental = 0
self.is_container = 0
self.is_valgrind = 0
self.benchmark = 0 # which tests to benchmark
self.timer = {} # timers
def log(self, args):
self.logger.log(args)
def debug(self, args):
self.logger.debug(args)
def has_key(self, key):
return key in self.Stats
def __setitem__(self, key, value):
self.Stats[key] = value
def __getitem__(self, key):
if str(key) == "0":
raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead")
if key in self.Stats:
return self.Stats[key]
return None
def log_mark(self, msg):
self.debug("MARK: test %s %s %d" % (self.name,msg,time.time()))
return
def get_timer(self,key = "test"):
try: return self.timer[key]
except: return 0
def set_timer(self,key = "test"):
self.timer[key] = time.time()
return self.timer[key]
def log_timer(self,key = "test"):
elapsed = 0
if key in self.timer:
elapsed = time.time() - self.timer[key]
s = key == "test" and self.name or "%s:%s" % (self.name,key)
self.debug("%s runtime: %.2f" % (s, elapsed))
del self.timer[key]
return elapsed
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not name in self.Stats:
self.Stats[name] = 0
self.Stats[name] = self.Stats[name]+1
# Reset the test passed boolean
if name == "calls":
self.passed = 1
def failure(self, reason="none"):
'''Increment the failure count'''
self.passed = 0
self.incr("failure")
self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason)
return None
def success(self):
'''Increment the success count'''
self.incr("success")
return 1
def skipped(self):
'''Increment the skipped count'''
self.incr("skipped")
return 1
def __call__(self, node):
'''Perform the given test'''
raise ValueError("Abstract Class member (__call__)")
self.incr("calls")
return self.failure()
def audit(self):
passed = 1
if len(self.Audits) > 0:
for audit in self.Audits:
if not audit():
self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name()))
self.incr("auditfail")
passed = 0
return passed
def setup(self, node):
'''Setup the given test'''
return self.success()
def teardown(self, node):
'''Tear down the given test'''
return self.success()
def create_watch(self, patterns, timeout, name=None):
if not name:
name = self.name
return LogWatcher(self.Env["LogFileName"], patterns, name, timeout, kind=self.Env["LogWatcher"], hosts=self.Env["nodes"])
def local_badnews(self, prefix, watch, local_ignore=[]):
errcount = 0
if not prefix:
prefix = "LocalBadNews:"
ignorelist = []
ignorelist.append(" CTS: ")
ignorelist.append(prefix)
ignorelist.extend(local_ignore)
while errcount < 100:
match = watch.look(0)
if match:
add_err = 1
for ignore in ignorelist:
if add_err == 1 and re.search(ignore, match):
add_err = 0
if add_err == 1:
self.logger.log(prefix + " " + match)
errcount = errcount + 1
else:
break
else:
self.logger.log("Too many errors!")
watch.end()
return errcount
def is_applicable(self):
return self.is_applicable_common()
def is_applicable_common(self):
'''Return TRUE if we are applicable in the current test configuration'''
#raise ValueError("Abstract Class member (is_applicable)")
if self.is_loop and not self.Env["loop-tests"]:
return 0
elif self.is_unsafe and not self.Env["unsafe-tests"]:
return 0
elif self.is_valgrind and not self.Env["valgrind-tests"]:
return 0
elif self.is_experimental and not self.Env["experimental-tests"]:
return 0
elif self.is_docker_unsafe and self.Env["docker"]:
return 0
elif self.is_container and not self.Env["container-tests"]:
return 0
elif self.Env["benchmark"] and self.benchmark == 0:
return 0
return 1
def find_ocfs2_resources(self, node):
self.r_o2cb = None
self.r_ocfs2 = []
(rc, lines) = self.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rtype == "o2cb" and r.parent != "NA":
self.debug("Found o2cb: %s" % self.r_o2cb)
self.r_o2cb = r.parent
if re.search("^Constraint", line):
c = AuditConstraint(self.CM, line)
if c.type == "rsc_colocation" and c.target == self.r_o2cb:
self.r_ocfs2.append(c.rsc)
self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2))
return len(self.r_ocfs2)
def canrunnow(self, node):
'''Return TRUE if we can meaningfully run right now'''
return 1
def errorstoignore(self):
'''Return list of errors which are 'normal' and should be ignored'''
return []
class StopTest(CTSTest):
'''Stop (deactivate) the cluster manager on a node'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name = "Stop"
def __call__(self, node):
'''Perform the 'stop' test. '''
self.incr("calls")
if self.CM.ShouldBeStatus[node] != "up":
return self.skipped()
patterns = []
# Technically we should always be able to notice ourselves stopping
patterns.append(self.templates["Pat:We_stopped"] % node)
# Any active node needs to notice this one left
# (note that this won't work if we have multiple partitions)
for other in self.Env["nodes"]:
if self.CM.ShouldBeStatus[other] == "up" and other != node:
patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node)))
#self.debug("Checking %s will notice %s left"%(other, node))
watch = self.create_watch(patterns, self.Env["DeadTime"])
watch.setwatch()
if node == self.CM.OurNode:
self.incr("us")
else:
if self.CM.upcount() <= 1:
self.incr("all")
else:
self.incr("them")
self.CM.StopaCM(node)
watch_result = watch.lookforall()
failreason = None
UnmatchedList = "||"
if watch.unmatched:
(rc, output) = self.rsh(node, "/bin/ps axf", None)
for line in output:
self.debug(line)
(rc, output) = self.rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", None)
for line in output:
self.debug(line)
for regex in watch.unmatched:
self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex))
UnmatchedList += regex + "||";
failreason = "Missing shutdown pattern"
self.CM.cluster_stable(self.Env["DeadTime"])
if not watch.unmatched or self.CM.upcount() == 0:
return self.success()
if len(watch.unmatched) >= self.CM.upcount():
return self.failure("no match against (%s)" % UnmatchedList)
if failreason == None:
return self.success()
else:
return self.failure(failreason)
#
# We don't register StopTest because it's better when called by
# another test...
#
class StartTest(CTSTest):
'''Start (activate) the cluster manager on a node'''
def __init__(self, cm, debug=None):
CTSTest.__init__(self,cm)
self.name = "start"
self.debug = debug
def __call__(self, node):
'''Perform the 'start' test. '''
self.incr("calls")
if self.CM.upcount() == 0:
self.incr("us")
else:
self.incr("them")
if self.CM.ShouldBeStatus[node] != "down":
return self.skipped()
elif self.CM.StartaCM(node):
return self.success()
else:
return self.failure("Startup %s on node %s failed"
% (self.Env["Name"], node))
#
# We don't register StartTest because it's better when called by
# another test...
#
class FlipTest(CTSTest):
'''If it's running, stop it. If it's stopped start it.
Overthrow the status quo...
'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "Flip"
self.start = StartTest(cm)
self.stop = StopTest(cm)
def __call__(self, node):
'''Perform the 'Flip' test. '''
self.incr("calls")
if self.CM.ShouldBeStatus[node] == "up":
self.incr("stopped")
ret = self.stop(node)
type = "up->down"
# Give the cluster time to recognize it's gone...
time.sleep(self.Env["StableTime"])
elif self.CM.ShouldBeStatus[node] == "down":
self.incr("started")
ret = self.start(node)
type = "down->up"
else:
return self.skipped()
self.incr(type)
if ret:
return self.success()
else:
return self.failure("%s failure" % type)
# Register FlipTest as a good test to run
AllTestClasses.append(FlipTest)
class RestartTest(CTSTest):
'''Stop and restart a node'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "Restart"
self.start = StartTest(cm)
self.stop = StopTest(cm)
self.benchmark = 1
def __call__(self, node):
'''Perform the 'restart' test. '''
self.incr("calls")
self.incr("node:" + node)
ret1 = 1
if self.CM.StataCM(node):
self.incr("WasStopped")
if not self.start(node):
return self.failure("start (setup) failure: "+node)
self.set_timer()
if not self.stop(node):
return self.failure("stop failure: "+node)
if not self.start(node):
return self.failure("start failure: "+node)
return self.success()
# Register RestartTest as a good test to run
AllTestClasses.append(RestartTest)
class StonithdTest(CTSTest):
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name = "Stonithd"
self.startall = SimulStartLite(cm)
self.benchmark = 1
def __call__(self, node):
self.incr("calls")
if len(self.Env["nodes"]) < 2:
return self.skipped()
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
is_dc = self.CM.is_node_dc(node)
watchpats = []
watchpats.append(self.templates["Pat:FenceOpOK"] % node)
watchpats.append(self.templates["Pat:NodeFenced"] % node)
if self.Env["at-boot"] == 0:
self.debug("Expecting %s to stay down" % node)
self.CM.ShouldBeStatus[node] = "down"
else:
self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"]))
watchpats.append("%s.* S_STARTING -> S_PENDING" % node)
watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node)
watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"])
watch.setwatch()
origin = self.Env.RandomGen.choice(self.Env["nodes"])
rc = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node)
if rc == 194:
# 194 - 256 = -62 = Timer expired
#
# Look for the patterns, usually this means the required
# device was running on the node to be fenced - or that
# the required devices were in the process of being loaded
# and/or moved
#
# Effectively the node committed suicide so there will be
# no confirmation, but pacemaker should be watching and
# fence the node again
self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node))
elif origin != node and rc != 0:
self.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
self.debug("Waiting for fenced node to come back up")
self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc))
elif origin == node and rc != 255:
# 255 == broken pipe, ie. the node was fenced as expected
self.logger.log("Locally originated fencing returned %d" % rc)
self.set_timer("fence")
matched = watch.lookforall()
self.log_timer("fence")
self.set_timer("reform")
if watch.unmatched:
self.logger.log("Patterns not found: " + repr(watch.unmatched))
self.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
self.debug("Waiting for fenced node to come back up")
self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
self.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self.CM.cluster_stable(self.Env["StartTime"])
if not matched:
return self.failure("Didn't find all expected patterns")
elif not is_stable:
return self.failure("Cluster did not become stable")
self.log_timer("reform")
return self.success()
def errorstoignore(self):
return [
self.templates["Pat:Fencing_start"] % ".*",
self.templates["Pat:Fencing_ok"] % ".*",
self.templates["Pat:Fencing_active"],
r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired",
]
def is_applicable(self):
if not self.is_applicable_common():
return 0
if "DoFencing" in list(self.Env.keys()):
return self.Env["DoFencing"]
return 1
AllTestClasses.append(StonithdTest)
class StartOnebyOne(CTSTest):
'''Start all the nodes ~ one by one'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "StartOnebyOne"
self.stopall = SimulStopLite(cm)
self.start = StartTest(cm)
self.ns = CTS.NodeStatus(cm.Env)
def __call__(self, dummy):
'''Perform the 'StartOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return self.failure("Test setup failed")
failed = []
self.set_timer()
for node in self.Env["nodes"]:
if not self.start(node):
failed.append(node)
if len(failed) > 0:
return self.failure("Some node failed to start: " + repr(failed))
return self.success()
# Register StartOnebyOne as a good test to run
AllTestClasses.append(StartOnebyOne)
class SimulStart(CTSTest):
'''Start all the nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "SimulStart"
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
def __call__(self, dummy):
'''Perform the 'SimulStart' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return self.failure("Setup failed")
if not self.startall(None):
return self.failure("Startall failed")
return self.success()
# Register SimulStart as a good test to run
AllTestClasses.append(SimulStart)
class SimulStop(CTSTest):
'''Stop all the nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "SimulStop"
self.startall = SimulStartLite(cm)
self.stopall = SimulStopLite(cm)
def __call__(self, dummy):
'''Perform the 'SimulStop' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
if not self.stopall(None):
return self.failure("Stopall failed")
return self.success()
# Register SimulStop as a good test to run
AllTestClasses.append(SimulStop)
class StopOnebyOne(CTSTest):
'''Stop all the nodes in order'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "StopOnebyOne"
self.startall = SimulStartLite(cm)
self.stop = StopTest(cm)
def __call__(self, dummy):
'''Perform the 'StopOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
failed = []
self.set_timer()
for node in self.Env["nodes"]:
if not self.stop(node):
failed.append(node)
if len(failed) > 0:
return self.failure("Some node failed to stop: " + repr(failed))
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(StopOnebyOne)
class RestartOnebyOne(CTSTest):
'''Restart all the nodes in order'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "RestartOnebyOne"
self.startall = SimulStartLite(cm)
def __call__(self, dummy):
'''Perform the 'RestartOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
did_fail = []
self.set_timer()
self.restart = RestartTest(self.CM)
for node in self.Env["nodes"]:
if not self.restart(node):
did_fail.append(node)
if did_fail:
return self.failure("Could not restart %d nodes: %s"
% (len(did_fail), repr(did_fail)))
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(RestartOnebyOne)
class PartialStart(CTSTest):
'''Start a node - but tell it to stop before it finishes starting up'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "PartialStart"
self.startall = SimulStartLite(cm)
self.stopall = SimulStopLite(cm)
self.stop = StopTest(cm)
#self.is_unsafe = 1
def __call__(self, node):
'''Perform the 'PartialStart' test. '''
self.incr("calls")
ret = self.stopall(None)
if not ret:
return self.failure("Setup failed")
watchpats = []
watchpats.append("pacemaker-controld.*Connecting to .* cluster infrastructure")
watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
watch.setwatch()
self.CM.StartaCMnoBlock(node)
ret = watch.lookforall()
if not ret:
self.logger.log("Patterns not found: " + repr(watch.unmatched))
return self.failure("Setup of %s failed" % node)
ret = self.stop(node)
if not ret:
return self.failure("%s did not stop in time" % node)
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
# We might do some fencing in the 2-node case if we make it up far enough
return [
r"Executing reboot fencing operation",
r"Requesting fencing \([^)]+\) of node ",
]
# Register StopOnebyOne as a good test to run
AllTestClasses.append(PartialStart)
class StandbyTest(CTSTest):
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "Standby"
self.benchmark = 1
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
# make sure the node is active
# set the node to standby mode
# check resources, none resource should be running on the node
# set the node to active mode
# check resouces, resources should have been migrated back (SHOULD THEY?)
def __call__(self, node):
self.incr("calls")
ret = self.startall(None)
if not ret:
return self.failure("Start all nodes failed")
self.debug("Make sure node %s is active" % node)
if self.CM.StandbyStatus(node) != "off":
if not self.CM.SetStandbyMode(node, "off"):
return self.failure("can't set node %s to active mode" % node)
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "off":
return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
self.debug("Getting resources running on node %s" % node)
rsc_on_node = self.CM.active_resources(node)
watchpats = []
watchpats.append(r"State transition .* -> S_POLICY_ENGINE")
watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
watch.setwatch()
self.debug("Setting node %s to standby mode" % node)
if not self.CM.SetStandbyMode(node, "on"):
return self.failure("can't set node %s to standby mode" % node)
self.set_timer("on")
ret = watch.lookforall()
if not ret:
self.logger.log("Patterns not found: " + repr(watch.unmatched))
self.CM.SetStandbyMode(node, "off")
return self.failure("cluster didn't react to standby change on %s" % node)
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "on":
return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status))
self.log_timer("on")
self.debug("Checking resources")
bad_run = self.CM.active_resources(node)
if len(bad_run) > 0:
rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run)))
self.debug("Setting node %s to active mode" % node)
self.CM.SetStandbyMode(node, "off")
return rc
self.debug("Setting node %s to active mode" % node)
if not self.CM.SetStandbyMode(node, "off"):
return self.failure("can't set node %s to active mode" % node)
self.set_timer("off")
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "off":
return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
self.log_timer("off")
return self.success()
AllTestClasses.append(StandbyTest)
class ValgrindTest(CTSTest):
'''Check for memory leaks'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "Valgrind"
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
self.is_valgrind = 1
self.is_loop = 1
def setup(self, node):
self.incr("calls")
ret = self.stopall(None)
if not ret:
return self.failure("Stop all nodes failed")
# @TODO Edit /etc/sysconfig/pacemaker on all nodes to enable valgrind,
# and clear any valgrind logs from previous runs. For now, we rely on
# the user to do this manually.
ret = self.startall(None)
if not ret:
return self.failure("Start all nodes failed")
return self.success()
def teardown(self, node):
# Return all nodes to normal
# @TODO Edit /etc/sysconfig/pacemaker on all nodes to disable valgrind
ret = self.stopall(None)
if not ret:
return self.failure("Stop all nodes failed")
return self.success()
def find_leaks(self):
# Check for leaks
# (no longer used but kept in case feature is restored)
leaked = []
self.stop = StopTest(self.CM)
for node in self.Env["nodes"]:
rc = self.stop(node)
if not rc:
self.failure("Couldn't shut down %s" % node)
rc = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat, 0)
if rc != 1:
leaked.append(node)
self.failure("Valgrind errors detected on %s" % node)
(rc, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, None)
for line in output:
self.logger.log(line)
(rc, output) = self.rsh(node, "cat %s" % self.logger.logPat, None)
for line in output:
self.debug(line)
self.rsh(node, "rm -f %s" % self.logger.logPat, None)
return leaked
def __call__(self, node):
#leaked = self.find_leaks()
#if len(leaked) > 0:
# return self.failure("Nodes %s leaked" % repr(leaked))
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [
r"pacemaker-based.*: \*\*\*\*\*\*\*\*\*\*\*\*\*",
r"pacemaker-based.*: .* avoid confusing Valgrind",
r"HA_VALGRIND_ENABLED",
]
class StandbyLoopTest(ValgrindTest):
'''Check for memory leaks by putting a node in and out of standby for an hour'''
# @TODO This is not a useful test for memory leaks
def __init__(self, cm):
ValgrindTest.__init__(self,cm)
self.name = "StandbyLoop"
def __call__(self, node):
lpc = 0
delay = 2
failed = 0
done = time.time() + self.Env["loop-minutes"] * 60
while time.time() <= done and not failed:
lpc = lpc + 1
time.sleep(delay)
if not self.CM.SetStandbyMode(node, "on"):
self.failure("can't set node %s to standby mode" % node)
failed = lpc
time.sleep(delay)
if not self.CM.SetStandbyMode(node, "off"):
self.failure("can't set node %s to active mode" % node)
failed = lpc
leaked = self.find_leaks()
if failed:
return self.failure("Iteration %d failed" % failed)
elif len(leaked) > 0:
return self.failure("Nodes %s leaked" % repr(leaked))
return self.success()
#AllTestClasses.append(StandbyLoopTest)
class BandwidthTest(CTSTest):
# Tests should not be cluster-manager-specific
# If you need to find out cluster manager configuration to do this, then
# it should be added to the generic cluster manager API.
'''Test the bandwidth which the cluster uses'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name = "Bandwidth"
self.start = StartTest(cm)
self.__setitem__("min",0)
self.__setitem__("max",0)
self.__setitem__("totalbandwidth",0)
(handle, self.tempfile) = tempfile.mkstemp(".cts")
os.close(handle)
self.startall = SimulStartLite(cm)
def __call__(self, node):
'''Perform the Bandwidth test'''
self.incr("calls")
if self.CM.upcount() < 1:
return self.skipped()
Path = self.CM.InternalCommConfig()
if "ip" not in Path["mediatype"]:
return self.skipped()
port = Path["port"][0]
port = int(port)
ret = self.startall(None)
if not ret:
return self.failure("Test setup failed")
time.sleep(5) # We get extra messages right after startup.
fstmpfile = "/var/run/band_estimate"
dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \
% (port, fstmpfile)
rc = self.rsh(node, dumpcmd)
if rc == 0:
farfile = "root@%s:%s" % (node, fstmpfile)
self.rsh.cp(farfile, self.tempfile)
Bandwidth = self.countbandwidth(self.tempfile)
if not Bandwidth:
self.logger.log("Could not compute bandwidth.")
return self.success()
intband = int(Bandwidth + 0.5)
self.logger.log("...bandwidth: %d bits/sec" % intband)
self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth
if self.Stats["min"] == 0:
self.Stats["min"] = Bandwidth
if Bandwidth > self.Stats["max"]:
self.Stats["max"] = Bandwidth
if Bandwidth < self.Stats["min"]:
self.Stats["min"] = Bandwidth
self.rsh(node, "rm -f %s" % fstmpfile)
os.unlink(self.tempfile)
return self.success()
else:
return self.failure("no response from tcpdump command [%d]!" % rc)
def countbandwidth(self, file):
fp = open(file, "r")
fp.seek(0)
count = 0
sum = 0
while 1:
line = fp.readline()
if not line:
return None
if re.search("udp",line) or re.search("UDP,", line):
count = count + 1
linesplit = line.split(" ")
for j in range(len(linesplit)-1):
if linesplit[j] == "udp": break
if linesplit[j] == "length:": break
try:
sum = sum + int(linesplit[j+1])
except ValueError:
self.logger.log("Invalid tcpdump line: %s" % line)
return None
T1 = linesplit[0]
timesplit = T1.split(":")
time2split = timesplit[2].split(".")
time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001
break
while count < 100:
line = fp.readline()
if not line:
return None
if re.search("udp",line) or re.search("UDP,", line):
count = count+1
linessplit = line.split(" ")
for j in range(len(linessplit)-1):
if linessplit[j] == "udp": break
if linessplit[j] == "length:": break
try:
sum = int(linessplit[j+1]) + sum
except ValueError:
self.logger.log("Invalid tcpdump line: %s" % line)
return None
T2 = linessplit[0]
timesplit = T2.split(":")
time2split = timesplit[2].split(".")
time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001
time = time2-time1
if (time <= 0):
return 0
return int((sum*8)/time)
def is_applicable(self):
'''BandwidthTest never applicable'''
return 0
AllTestClasses.append(BandwidthTest)
###################################################################
class MaintenanceMode(CTSTest):
###################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "MaintenanceMode"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
self.max = 30
#self.is_unsafe = 1
self.benchmark = 1
self.action = "asyncmon"
self.interval = 0
self.rid = "maintenanceDummy"
def toggleMaintenanceMode(self, node, action):
pats = []
pats.append(self.templates["Pat:DC_IDLE"])
# fail the resource right after turning Maintenance mode on
# verify it is not recovered until maintenance mode is turned off
if action == "On":
pats.append(self.templates["Pat:RscOpFail"] % (self.action, self.rid))
else:
pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid))
watch = self.create_watch(pats, 60)
watch.setwatch()
self.debug("Turning maintenance mode %s" % action)
self.rsh(node, self.templates["MaintenanceMode%s" % (action)])
if (action == "On"):
self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
self.set_timer("recover%s" % (action))
watch.lookforall()
self.log_timer("recover%s" % (action))
if watch.unmatched:
self.debug("Failed to find patterns when turning maintenance mode %s" % action)
return repr(watch.unmatched)
return ""
def insertMaintenanceDummy(self, node):
pats = []
pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self.rid)))
watch = self.create_watch(pats, 60)
watch.setwatch()
self.CM.AddDummyRsc(node, self.rid)
self.set_timer("addDummy")
watch.lookforall()
self.log_timer("addDummy")
if watch.unmatched:
self.debug("Failed to find patterns when adding maintenance dummy resource")
return repr(watch.unmatched)
return ""
def removeMaintenanceDummy(self, node):
pats = []
pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
watch = self.create_watch(pats, 60)
watch.setwatch()
self.CM.RemoveDummyRsc(node, self.rid)
self.set_timer("removeDummy")
watch.lookforall()
self.log_timer("removeDummy")
if watch.unmatched:
self.debug("Failed to find patterns when removing maintenance dummy resource")
return repr(watch.unmatched)
return ""
def managedRscList(self, node):
rscList = []
(rc, lines) = self.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
tmp = AuditResource(self.CM, line)
if tmp.managed():
rscList.append(tmp.id)
return rscList
def verifyResources(self, node, rscList, managed):
managedList = list(rscList)
managed_str = "managed"
if not managed:
managed_str = "unmanaged"
(rc, lines) = self.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
tmp = AuditResource(self.CM, line)
if managed and not tmp.managed():
continue
elif not managed and tmp.managed():
continue
elif managedList.count(tmp.id):
managedList.remove(tmp.id)
if len(managedList) == 0:
self.debug("Found all %s resources on %s" % (managed_str, node))
return True
self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList))
return False
def __call__(self, node):
'''Perform the 'MaintenanceMode' test. '''
self.incr("calls")
verify_managed = False
verify_unmanaged = False
failPat = ""
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
# get a list of all the managed resources. We use this list
# after enabling maintenance mode to verify all managed resources
# become un-managed. After maintenance mode is turned off, we use
# this list to verify all the resources become managed again.
managedResources = self.managedRscList(node)
if len(managedResources) == 0:
self.logger.log("No managed resources on %s" % node)
return self.skipped()
# insert a fake resource we can fail during maintenance mode
# so we can verify recovery does not take place until after maintenance
# mode is disabled.
failPat = failPat + self.insertMaintenanceDummy(node)
# toggle maintenance mode ON, then fail dummy resource.
failPat = failPat + self.toggleMaintenanceMode(node, "On")
# verify all the resources are now unmanaged
if self.verifyResources(node, managedResources, False):
verify_unmanaged = True
# Toggle maintenance mode OFF, verify dummy is recovered.
failPat = failPat + self.toggleMaintenanceMode(node, "Off")
# verify all the resources are now managed again
if self.verifyResources(node, managedResources, True):
verify_managed = True
# Remove our maintenance dummy resource.
failPat = failPat + self.removeMaintenanceDummy(node)
self.CM.cluster_stable()
if failPat != "":
return self.failure("Unmatched patterns: %s" % (failPat))
elif verify_unmanaged is False:
return self.failure("Failed to verify resources became unmanaged during maintenance mode")
elif verify_managed is False:
return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode")
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [
r"Updating failcount for %s" % self.rid,
r"schedulerd.*: Recover %s\s*\(.*\)" % self.rid,
r"Unknown operation: fail",
self.templates["Pat:RscOpOK"] % (self.action, self.rid),
r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval),
]
AllTestClasses.append(MaintenanceMode)
class ResourceRecover(CTSTest):
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "ResourceRecover"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
self.max = 30
self.rid = None
self.rid_alt = None
#self.is_unsafe = 1
self.benchmark = 1
# these are the values used for the new LRM API call
self.action = "asyncmon"
self.interval = 0
def __call__(self, node):
'''Perform the 'ResourceRecover' test. '''
self.incr("calls")
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
resourcelist = self.CM.active_resources(node)
# if there are no resourcelist, return directly
if len(resourcelist) == 0:
self.logger.log("No active resources on %s" % node)
return self.skipped()
self.rid = self.Env.RandomGen.choice(resourcelist)
self.rid_alt = self.rid
rsc = None
(rc, lines) = self.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
tmp = AuditResource(self.CM, line)
if tmp.id == self.rid:
rsc = tmp
# Handle anonymous clones that get renamed
self.rid = rsc.clone_id
break
if not rsc:
return self.failure("Could not find %s in the resource list" % self.rid)
self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id))
pats = []
pats.append(self.templates["Pat:CloneOpFail"] % (self.action, rsc.id, rsc.clone_id))
if rsc.managed():
pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
if rsc.unique():
pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid))
else:
# Anonymous clones may get restarted with a different clone number
pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*"))
watch = self.create_watch(pats, 60)
watch.setwatch()
self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
self.set_timer("recover")
watch.lookforall()
self.log_timer("recover")
self.CM.cluster_stable()
recovered = self.CM.ResourceLocation(self.rid)
if watch.unmatched:
return self.failure("Patterns not found: %s" % repr(watch.unmatched))
elif rsc.unique() and len(recovered) > 1:
return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered)))
elif len(recovered) > 0:
self.debug("%s is running on: %s" % (self.rid, repr(recovered)))
elif rsc.managed():
return self.failure("%s was not recovered and is inactive" % self.rid)
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [
r"Updating failcount for %s" % self.rid,
r"schedulerd.*: Recover (%s|%s)\s*\(.*\)" % (self.rid, self.rid_alt),
r"Unknown operation: fail",
self.templates["Pat:RscOpOK"] % (self.action, self.rid),
r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval),
]
AllTestClasses.append(ResourceRecover)
class ComponentFail(CTSTest):
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "ComponentFail"
# TODO make this work correctly in docker.
self.is_docker_unsafe = 1
self.startall = SimulStartLite(cm)
self.complist = cm.Components()
self.patterns = []
self.okerrpatterns = []
self.is_unsafe = 1
def __call__(self, node):
'''Perform the 'ComponentFail' test. '''
self.incr("calls")
self.patterns = []
self.okerrpatterns = []
# start all nodes
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
if not self.CM.cluster_stable(self.Env["StableTime"]):
return self.failure("Setup failed - unstable")
node_is_dc = self.CM.is_node_dc(node, None)
# select a component to kill
chosen = self.Env.RandomGen.choice(self.complist)
while chosen.dc_only == 1 and node_is_dc == 0:
chosen = self.Env.RandomGen.choice(self.complist)
self.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot))
self.incr(chosen.name)
if chosen.name != "corosync":
self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name))
self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name))
self.patterns.extend(chosen.pats)
if node_is_dc:
self.patterns.extend(chosen.dc_pats)
# @TODO this should be a flag in the Component
if chosen.name in [ "corosync", "pacemaker-based", "pacemaker-fenced" ]:
# Ignore actions for fence devices if fencer will respawn
# (their registration will be lost, and probes will fail)
self.okerrpatterns = [ self.templates["Pat:Fencing_active"] ]
(rc, lines) = self.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rclass == "stonith":
self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id)
self.okerrpatterns.append(self.templates["Pat:Fencing_probe"] % r.id)
# supply a copy so self.patterns doesn't end up empty
tmpPats = []
tmpPats.extend(self.patterns)
self.patterns.extend(chosen.badnews_ignore)
# Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
stonithPats = []
stonithPats.append(self.templates["Pat:Fencing_ok"] % node)
stonith = self.create_watch(stonithPats, 0)
stonith.setwatch()
# set the watch for stable
watch = self.create_watch(
tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"])
watch.setwatch()
# kill the component
chosen.kill(node)
self.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
self.debug("Waiting for any fenced node to come back up")
self.CM.ns.WaitForAllNodesToComeUp(self.Env["nodes"], 600)
self.debug("Waiting for the cluster to re-stabilize with all nodes")
self.CM.cluster_stable(self.Env["StartTime"])
self.debug("Checking if %s was shot" % node)
shot = stonith.look(60)
if shot:
self.debug("Found: " + repr(shot))
self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node)
if self.Env["at-boot"] == 0:
self.CM.ShouldBeStatus[node] = "down"
# If fencing occurred, chances are many (if not all) the expected logs
# will not be sent - or will be lost when the node reboots
return self.success()
# check for logs indicating a graceful recovery
matched = watch.lookforall(allow_multiple_matches=1)
if watch.unmatched:
self.logger.log("Patterns not found: " + repr(watch.unmatched))
self.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self.CM.cluster_stable(self.Env["StartTime"])
if not matched:
return self.failure("Didn't find all expected %s patterns" % chosen.name)
elif not is_stable:
return self.failure("Cluster did not become stable after killing %s" % chosen.name)
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
# Note that okerrpatterns refers to the last time we ran this test
# The good news is that this works fine for us...
self.okerrpatterns.extend(self.patterns)
return self.okerrpatterns
AllTestClasses.append(ComponentFail)
class SplitBrainTest(CTSTest):
'''It is used to test split-brain. when the path between the two nodes break
check the two nodes both take over the resource'''
def __init__(self,cm):
CTSTest.__init__(self,cm)
self.name = "SplitBrain"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
self.is_experimental = 1
def isolate_partition(self, partition):
other_nodes = []
other_nodes.extend(self.Env["nodes"])
for node in partition:
try:
other_nodes.remove(node)
except ValueError:
self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition))
if len(other_nodes) == 0:
return 1
self.debug("Creating partition: " + repr(partition))
self.debug("Everyone else: " + repr(other_nodes))
for node in partition:
if not self.CM.isolate_node(node, other_nodes):
self.logger.log("Could not isolate %s" % node)
return 0
return 1
def heal_partition(self, partition):
other_nodes = []
other_nodes.extend(self.Env["nodes"])
for node in partition:
try:
other_nodes.remove(node)
except ValueError:
self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]))
if len(other_nodes) == 0:
return 1
self.debug("Healing partition: " + repr(partition))
self.debug("Everyone else: " + repr(other_nodes))
for node in partition:
self.CM.unisolate_node(node, other_nodes)
def __call__(self, node):
'''Perform split-brain test'''
self.incr("calls")
self.passed = 1
partitions = {}
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
while 1:
# Retry until we get multiple partitions
partitions = {}
p_max = len(self.Env["nodes"])
for node in self.Env["nodes"]:
p = self.Env.RandomGen.randint(1, p_max)
if not p in partitions:
partitions[p] = []
partitions[p].append(node)
p_max = len(list(partitions.keys()))
if p_max > 1:
break
# else, try again
self.debug("Created %d partitions" % p_max)
for key in list(partitions.keys()):
self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key]))
# Disabling STONITH to reduce test complexity for now
self.rsh(node, "crm_attribute -V -n stonith-enabled -v false")
for key in list(partitions.keys()):
self.isolate_partition(partitions[key])
count = 30
while count > 0:
if len(self.CM.find_partitions()) != p_max:
time.sleep(10)
else:
break
else:
self.failure("Expected partitions were not created")
# Target number of partitions formed - wait for stability
if not self.CM.cluster_stable():
self.failure("Partitioned cluster not stable")
# Now audit the cluster state
self.CM.partitions_expected = p_max
if not self.audit():
self.failure("Audits failed")
self.CM.partitions_expected = 1
# And heal them again
for key in list(partitions.keys()):
self.heal_partition(partitions[key])
# Wait for a single partition to form
count = 30
while count > 0:
if len(self.CM.find_partitions()) != 1:
time.sleep(10)
count -= 1
else:
break
else:
self.failure("Cluster did not reform")
# Wait for it to have the right number of members
count = 30
while count > 0:
members = []
partitions = self.CM.find_partitions()
if len(partitions) > 0:
members = partitions[0].split()
if len(members) != len(self.Env["nodes"]):
time.sleep(10)
count -= 1
else:
break
else:
self.failure("Cluster did not completely reform")
# Wait up to 20 minutes - the delay is more preferable than
# trying to continue with in a messed up state
if not self.CM.cluster_stable(1200):
self.failure("Reformed cluster not stable")
if self.Env["continue"] == 1:
answer = "Y"
else:
try:
answer = input('Continue? [nY]')
except EOFError as e:
answer = "n"
if answer and answer == "n":
raise ValueError("Reformed cluster not stable")
# Turn fencing back on
if self.Env["DoFencing"]:
self.rsh(node, "crm_attribute -V -D -n stonith-enabled")
self.CM.cluster_stable()
if self.passed:
return self.success()
return self.failure("See previous errors")
def errorstoignore(self):
'''Return list of errors which are 'normal' and should be ignored'''
return [
r"Another DC detected:",
r"(ERROR|error).*: .*Application of an update diff failed",
r"pacemaker-controld.*:.*not in our membership list",
r"CRIT:.*node.*returning after partition",
]
def is_applicable(self):
if not self.is_applicable_common():
return 0
return len(self.Env["nodes"]) > 2
AllTestClasses.append(SplitBrainTest)
class Reattach(CTSTest):
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "Reattach"
self.startall = SimulStartLite(cm)
self.restart1 = RestartTest(cm)
self.stopall = SimulStopLite(cm)
self.is_unsafe = 0 # Handled by canrunnow()
def _is_managed(self, node):
is_managed = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", 1)
is_managed = is_managed[:-1] # Strip off the newline
return is_managed == "true"
def _set_unmanaged(self, node):
self.debug("Disable resource management")
self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false")
def _set_managed(self, node):
self.debug("Re-enable resource management")
self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D")
def setup(self, node):
attempt = 0
if not self.startall(None):
return None
# Make sure we are really _really_ stable and that all
# resources, including those that depend on transient node
# attributes, are started
while not self.CM.cluster_stable(double_check=True):
if attempt < 5:
attempt += 1
self.debug("Not stable yet, re-testing")
else:
self.logger.log("Cluster is not stable")
return None
return 1
def teardown(self, node):
# Make sure 'node' is up
start = StartTest(self.CM)
start(node)
if not self._is_managed(node):
self.logger.log("Attempting to re-enable resource management on %s" % node)
self._set_managed(node)
self.CM.cluster_stable()
if not self._is_managed(node):
self.logger.log("Could not re-enable resource management")
return 0
return 1
def canrunnow(self, node):
'''Return TRUE if we can meaningfully run right now'''
if self.find_ocfs2_resources(node):
self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present")
return 0
return 1
def __call__(self, node):
self.incr("calls")
pats = []
# Conveniently, the scheduler will display this message when disabling
# management, even if fencing is not enabled, so we can rely on it.
- managed = self.create_watch(["Delaying fencing operations"], 60)
+ managed = self.create_watch(["No fencing will be done"], 60)
managed.setwatch()
self._set_unmanaged(node)
if not managed.lookforall():
self.logger.log("Patterns not found: " + repr(managed.unmatched))
return self.failure("Resource management not disabled")
pats = []
pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*"))
pats.append(self.templates["Pat:RscOpOK"] % ("stop", ".*"))
pats.append(self.templates["Pat:RscOpOK"] % ("promote", ".*"))
pats.append(self.templates["Pat:RscOpOK"] % ("demote", ".*"))
pats.append(self.templates["Pat:RscOpOK"] % ("migrate", ".*"))
watch = self.create_watch(pats, 60, "ShutdownActivity")
watch.setwatch()
self.debug("Shutting down the cluster")
ret = self.stopall(None)
if not ret:
self._set_managed(node)
return self.failure("Couldn't shut down the cluster")
self.debug("Bringing the cluster back up")
ret = self.startall(None)
time.sleep(5) # allow ping to update the CIB
if not ret:
self._set_managed(node)
return self.failure("Couldn't restart the cluster")
if self.local_badnews("ResourceActivity:", watch):
self._set_managed(node)
return self.failure("Resources stopped or started during cluster restart")
watch = self.create_watch(pats, 60, "StartupActivity")
watch.setwatch()
# Re-enable resource management (and verify it happened).
self._set_managed(node)
self.CM.cluster_stable()
if not self._is_managed(node):
return self.failure("Could not re-enable resource management")
# Ignore actions for STONITH resources
ignore = []
(rc, lines) = self.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rclass == "stonith":
self.debug("Ignoring start actions for %s" % r.id)
ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id))
if self.local_badnews("ResourceActivity:", watch, ignore):
return self.failure("Resources stopped or started after resource management was re-enabled")
return ret
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [
r"resource( was|s were) active at shutdown",
]
def is_applicable(self):
return 1
AllTestClasses.append(Reattach)
class SpecialTest1(CTSTest):
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "SpecialTest1"
self.startall = SimulStartLite(cm)
self.restart1 = RestartTest(cm)
self.stopall = SimulStopLite(cm)
def __call__(self, node):
'''Perform the 'SpecialTest1' test for Andrew. '''
self.incr("calls")
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return self.failure("Could not stop all nodes")
# Test config recovery when the other nodes come up
self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*")
# Start the selected node
ret = self.restart1(node)
if not ret:
return self.failure("Could not start "+node)
# Start all remaining nodes
ret = self.startall(None)
if not ret:
return self.failure("Could not start the remaining nodes")
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
# Errors that occur as a result of the CIB being wiped
return [
r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed",
r"error.*: Resource start-up disabled since no STONITH resources have been defined",
r"error.*: Either configure some or disable STONITH with the stonith-enabled option",
r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity",
]
AllTestClasses.append(SpecialTest1)
class HAETest(CTSTest):
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "HAETest"
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
self.is_loop = 1
def setup(self, node):
# Start all remaining nodes
ret = self.startall(None)
if not ret:
return self.failure("Couldn't start all nodes")
return self.success()
def teardown(self, node):
# Stop everything
ret = self.stopall(None)
if not ret:
return self.failure("Couldn't stop all nodes")
return self.success()
def wait_on_state(self, node, resource, expected_clones, attempts=240):
while attempts > 0:
active = 0
(rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None)
# Hack until crm_resource does the right thing
if rc == 0 and lines:
active = len(lines)
if len(lines) == expected_clones:
return 1
elif rc == 1:
self.debug("Resource %s is still inactive" % resource)
elif rc == 234:
self.logger.log("Unknown resource %s" % resource)
return 0
elif rc == 246:
self.logger.log("Cluster is inactive")
return 0
elif rc != 0:
self.logger.log("Call to crm_resource failed, rc=%d" % rc)
return 0
else:
self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones))
attempts -= 1
time.sleep(1)
return 0
def find_dlm(self, node):
self.r_dlm = None
(rc, lines) = self.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rtype == "controld" and r.parent != "NA":
self.debug("Found dlm: %s" % self.r_dlm)
self.r_dlm = r.parent
return 1
return 0
def find_hae_resources(self, node):
self.r_dlm = None
self.r_o2cb = None
self.r_ocfs2 = []
if self.find_dlm(node):
self.find_ocfs2_resources(node)
def is_applicable(self):
if not self.is_applicable_common():
return 0
if self.Env["Schema"] == "hae":
return 1
return None
class HAERoleTest(HAETest):
def __init__(self, cm):
'''Lars' mount/unmount test for the HA extension. '''
HAETest.__init__(self,cm)
self.name = "HAERoleTest"
def change_state(self, node, resource, target):
rc = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s --meta" % (resource, target))
return rc
def __call__(self, node):
self.incr("calls")
lpc = 0
failed = 0
delay = 2
done = time.time() + self.Env["loop-minutes"]*60
self.find_hae_resources(node)
clone_max = len(self.Env["nodes"])
while time.time() <= done and not failed:
lpc = lpc + 1
self.change_state(node, self.r_dlm, "Stopped")
if not self.wait_on_state(node, self.r_dlm, 0):
self.failure("%s did not go down correctly" % self.r_dlm)
failed = lpc
self.change_state(node, self.r_dlm, "Started")
if not self.wait_on_state(node, self.r_dlm, clone_max):
self.failure("%s did not come up correctly" % self.r_dlm)
failed = lpc
if not self.wait_on_state(node, self.r_o2cb, clone_max):
self.failure("%s did not come up correctly" % self.r_o2cb)
failed = lpc
for fs in self.r_ocfs2:
if not self.wait_on_state(node, fs, clone_max):
self.failure("%s did not come up correctly" % fs)
failed = lpc
if failed:
return self.failure("iteration %d failed" % failed)
return self.success()
AllTestClasses.append(HAERoleTest)
class HAEStandbyTest(HAETest):
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
HAETest.__init__(self,cm)
self.name = "HAEStandbyTest"
def change_state(self, node, resource, target):
rc = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target))
return rc
def __call__(self, node):
self.incr("calls")
lpc = 0
failed = 0
done = time.time() + self.Env["loop-minutes"]*60
self.find_hae_resources(node)
clone_max = len(self.Env["nodes"])
while time.time() <= done and not failed:
lpc = lpc + 1
self.change_state(node, self.r_dlm, "true")
if not self.wait_on_state(node, self.r_dlm, clone_max-1):
self.failure("%s did not go down correctly" % self.r_dlm)
failed = lpc
self.change_state(node, self.r_dlm, "false")
if not self.wait_on_state(node, self.r_dlm, clone_max):
self.failure("%s did not come up correctly" % self.r_dlm)
failed = lpc
if not self.wait_on_state(node, self.r_o2cb, clone_max):
self.failure("%s did not come up correctly" % self.r_o2cb)
failed = lpc
for fs in self.r_ocfs2:
if not self.wait_on_state(node, fs, clone_max):
self.failure("%s did not come up correctly" % fs)
failed = lpc
if failed:
return self.failure("iteration %d failed" % failed)
return self.success()
AllTestClasses.append(HAEStandbyTest)
class NearQuorumPointTest(CTSTest):
'''
This test brings larger clusters near the quorum point (50%).
In addition, it will test doing starts and stops at the same time.
Here is how I think it should work:
- loop over the nodes and decide randomly which will be up and which
will be down Use a 50% probability for each of up/down.
- figure out what to do to get into that state from the current state
- in parallel, bring up those going up and bring those going down.
'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "NearQuorumPoint"
def __call__(self, dummy):
'''Perform the 'NearQuorumPoint' test. '''
self.incr("calls")
startset = []
stopset = []
stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint")
#decide what to do with each node
for node in self.Env["nodes"]:
action = self.Env.RandomGen.choice(["start","stop"])
#action = self.Env.RandomGen.choice(["start","stop","no change"])
if action == "start" :
startset.append(node)
elif action == "stop" :
stopset.append(node)
self.debug("start nodes:" + repr(startset))
self.debug("stop nodes:" + repr(stopset))
#add search patterns
watchpats = [ ]
for node in stopset:
if self.CM.ShouldBeStatus[node] == "up":
watchpats.append(self.templates["Pat:We_stopped"] % node)
for node in startset:
if self.CM.ShouldBeStatus[node] == "down":
#watchpats.append(self.templates["Pat:NonDC_started"] % node)
watchpats.append(self.templates["Pat:Local_started"] % node)
else:
for stopping in stopset:
if self.CM.ShouldBeStatus[stopping] == "up":
watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping)))
if len(watchpats) == 0:
return self.skipped()
if len(startset) != 0:
watchpats.append(self.templates["Pat:DC_IDLE"])
watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
watch.setwatch()
#begin actions
for node in stopset:
if self.CM.ShouldBeStatus[node] == "up":
self.CM.StopaCMnoBlock(node)
for node in startset:
if self.CM.ShouldBeStatus[node] == "down":
self.CM.StartaCMnoBlock(node)
#get the result
if watch.lookforall():
self.CM.cluster_stable()
self.CM.fencing_cleanup("NearQuorumPoint", stonith)
return self.success()
self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched))
#get the "bad" nodes
upnodes = []
for node in stopset:
if self.CM.StataCM(node) == 1:
upnodes.append(node)
downnodes = []
for node in startset:
if self.CM.StataCM(node) == 0:
downnodes.append(node)
self.CM.fencing_cleanup("NearQuorumPoint", stonith)
if upnodes == [] and downnodes == []:
self.CM.cluster_stable()
# Make sure they're completely down with no residule
for node in stopset:
self.rsh(node, self.templates["StopCmd"])
return self.success()
if len(upnodes) > 0:
self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes))
if len(downnodes) > 0:
self.logger.log("Warn: Unstartable nodes: " + repr(downnodes))
return self.failure()
def is_applicable(self):
return 1
AllTestClasses.append(NearQuorumPointTest)
class RollingUpgradeTest(CTSTest):
'''Perform a rolling upgrade of the cluster'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "RollingUpgrade"
self.start = StartTest(cm)
self.stop = StopTest(cm)
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
def setup(self, node):
# Start all remaining nodes
ret = self.stopall(None)
if not ret:
return self.failure("Couldn't stop all nodes")
for node in self.Env["nodes"]:
if not self.downgrade(node, None):
return self.failure("Couldn't downgrade %s" % node)
ret = self.startall(None)
if not ret:
return self.failure("Couldn't start all nodes")
return self.success()
def teardown(self, node):
# Stop everything
ret = self.stopall(None)
if not ret:
return self.failure("Couldn't stop all nodes")
for node in self.Env["nodes"]:
if not self.upgrade(node, None):
return self.failure("Couldn't upgrade %s" % node)
return self.success()
def install(self, node, version, start=1, flags="--force"):
target_dir = "/tmp/rpm-%s" % version
src_dir = "%s/%s" % (self.Env["rpm-dir"], version)
self.logger.log("Installing %s on %s with %s" % (version, node, flags))
if not self.stop(node):
return self.failure("stop failure: "+node)
rc = self.rsh(node, "mkdir -p %s" % target_dir)
rc = self.rsh(node, "rm -f %s/*.rpm" % target_dir)
(rc, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, None)
for line in lines:
line = line[:-1]
rc = self.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir))
rc = self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir))
if start and not self.start(node):
return self.failure("start failure: "+node)
return self.success()
def upgrade(self, node, start=1):
return self.install(node, self.Env["current-version"], start)
def downgrade(self, node, start=1):
return self.install(node, self.Env["previous-version"], start, "--force --nodeps")
def __call__(self, node):
'''Perform the 'Rolling Upgrade' test. '''
self.incr("calls")
for node in self.Env["nodes"]:
if self.upgrade(node):
return self.failure("Couldn't upgrade %s" % node)
self.CM.cluster_stable()
return self.success()
def is_applicable(self):
if not self.is_applicable_common():
return None
if not "rpm-dir" in list(self.Env.keys()):
return None
if not "current-version" in list(self.Env.keys()):
return None
if not "previous-version" in list(self.Env.keys()):
return None
return 1
# Register RestartTest as a good test to run
AllTestClasses.append(RollingUpgradeTest)
class BSC_AddResource(CTSTest):
'''Add a resource to the cluster'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name = "AddResource"
self.resource_offset = 0
self.cib_cmd = """cibadmin -C -o %s -X '%s' """
def __call__(self, node):
self.incr("calls")
self.resource_offset = self.resource_offset + 1
r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset)
start_pat = "pacemaker-controld.*%s_start_0.*confirmed.*ok"
patterns = []
patterns.append(start_pat % r_id)
watch = self.create_watch(patterns, self.Env["DeadTime"])
watch.setwatch()
ip = self.NextIP()
if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip):
return self.failure("Make resource %s failed" % r_id)
failed = 0
watch_result = watch.lookforall()
if watch.unmatched:
for regex in watch.unmatched:
self.logger.log ("Warn: Pattern not found: %s" % (regex))
failed = 1
if failed:
return self.failure("Resource pattern(s) not found")
if not self.CM.cluster_stable(self.Env["DeadTime"]):
return self.failure("Unstable cluster")
return self.success()
def NextIP(self):
ip = self.Env["IPBase"]
if ":" in ip:
fields = ip.rpartition(":")
fields[2] = str(hex(int(fields[2], 16)+1))
print(str(hex(int(f[2], 16)+1)))
else:
fields = ip.rpartition('.')
fields[2] = str(int(fields[2])+1)
ip = fields[0] + fields[1] + fields[3];
self.Env["IPBase"] = ip
return ip.strip()
def make_ip_resource(self, node, id, rclass, type, ip):
self.logger.log("Creating %s:%s:%s (%s) on %s" % (rclass,type,id,ip,node))
rsc_xml="""
""" % (id, rclass, type, id, id, ip)
node_constraint = """
""" % (id, id, id, id, node)
rc = 0
(rc, lines) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), None)
if rc != 0:
self.logger.log("Constraint creation failed: %d" % rc)
return None
(rc, lines) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), None)
if rc != 0:
self.logger.log("Resource creation failed: %d" % rc)
return None
return 1
def is_applicable(self):
if self.Env["DoBSC"]:
return 1
return None
AllTestClasses.append(BSC_AddResource)
class SimulStopLite(CTSTest):
'''Stop any active nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "SimulStopLite"
def __call__(self, dummy):
'''Perform the 'SimulStopLite' setup work. '''
self.incr("calls")
self.debug("Setup: " + self.name)
# We ignore the "node" parameter...
watchpats = [ ]
for node in self.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "up":
self.incr("WasStarted")
watchpats.append(self.templates["Pat:We_stopped"] % node)
if len(watchpats) == 0:
return self.success()
# Stop all the nodes - at about the same time...
watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
watch.setwatch()
self.set_timer()
for node in self.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "up":
self.CM.StopaCMnoBlock(node)
if watch.lookforall():
# Make sure they're completely down with no residule
for node in self.Env["nodes"]:
self.rsh(node, self.templates["StopCmd"])
return self.success()
did_fail = 0
up_nodes = []
for node in self.Env["nodes"]:
if self.CM.StataCM(node) == 1:
did_fail = 1
up_nodes.append(node)
if did_fail:
return self.failure("Active nodes exist: " + repr(up_nodes))
self.logger.log("Warn: All nodes stopped but CTS didn't detect: "
+ repr(watch.unmatched))
return self.failure("Missing log message: "+repr(watch.unmatched))
def is_applicable(self):
'''SimulStopLite is a setup test and never applicable'''
return 0
class SimulStartLite(CTSTest):
'''Start any stopped nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "SimulStartLite"
def __call__(self, dummy):
'''Perform the 'SimulStartList' setup work. '''
self.incr("calls")
self.debug("Setup: " + self.name)
# We ignore the "node" parameter...
node_list = []
for node in self.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "down":
self.incr("WasStopped")
node_list.append(node)
self.set_timer()
while len(node_list) > 0:
# Repeat until all nodes come up
watchpats = [ ]
uppat = self.templates["Pat:NonDC_started"]
if self.CM.upcount() == 0:
uppat = self.templates["Pat:Local_started"]
watchpats.append(self.templates["Pat:DC_IDLE"])
for node in node_list:
watchpats.append(uppat % node)
watchpats.append(self.templates["Pat:InfraUp"] % node)
watchpats.append(self.templates["Pat:PacemakerUp"] % node)
# Start all the nodes - at about the same time...
watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
watch.setwatch()
stonith = self.CM.prepare_fencing_watcher(self.name)
for node in node_list:
self.CM.StartaCMnoBlock(node)
watch.lookforall()
node_list = self.CM.fencing_cleanup(self.name, stonith)
if node_list == None:
return self.failure("Cluster did not stabilize")
# Remove node_list messages from watch.unmatched
for node in node_list:
self.logger.debug("Dealing with stonith operations for %s" % repr(node_list))
if watch.unmatched:
try:
watch.unmatched.remove(uppat % node)
except:
self.debug("Already matched: %s" % (uppat % node))
try:
watch.unmatched.remove(self.templates["Pat:InfraUp"] % node)
except:
self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node))
try:
watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node)
except:
self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node))
if watch.unmatched:
for regex in watch.unmatched:
self.logger.log ("Warn: Startup pattern not found: %s" %(regex))
if not self.CM.cluster_stable():
return self.failure("Cluster did not stabilize")
did_fail = 0
unstable = []
for node in self.Env["nodes"]:
if self.CM.StataCM(node) == 0:
did_fail = 1
unstable.append(node)
if did_fail:
return self.failure("Unstarted nodes exist: " + repr(unstable))
unstable = []
for node in self.Env["nodes"]:
if not self.CM.node_stable(node):
did_fail = 1
unstable.append(node)
if did_fail:
return self.failure("Unstable cluster nodes exist: " + repr(unstable))
return self.success()
def is_applicable(self):
'''SimulStartLite is a setup test and never applicable'''
return 0
def TestList(cm, audits):
result = []
for testclass in AllTestClasses:
bound_test = testclass(cm)
if bound_test.is_applicable():
bound_test.Audits = audits
result.append(bound_test)
return result
class RemoteLXC(CTSTest):
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = "RemoteLXC"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
self.num_containers = 2
self.is_container = 1
self.is_docker_unsafe = 1
self.failed = 0
self.fail_string = ""
def start_lxc_simple(self, node):
# restore any artifacts laying around from a previous test.
self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
# generate the containers, put them in the config, add some resources to them
pats = [ ]
watch = self.create_watch(pats, 120)
watch.setwatch()
pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1"))
pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2"))
pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms"))
pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms"))
self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers)
self.set_timer("remoteSimpleInit")
watch.lookforall()
self.log_timer("remoteSimpleInit")
if watch.unmatched:
self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
self.failed = 1
def cleanup_lxc_simple(self, node):
pats = [ ]
# if the test failed, attempt to clean up the cib and libvirt environment
# as best as possible
if self.failed == 1:
# restore libvirt and cib
self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
return
watch = self.create_watch(pats, 120)
watch.setwatch()
pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1"))
pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2"))
self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null")
self.set_timer("remoteSimpleCleanup")
watch.lookforall()
self.log_timer("remoteSimpleCleanup")
if watch.unmatched:
self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
self.failed = 1
# cleanup libvirt
self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
def __call__(self, node):
'''Perform the 'RemoteLXC' test. '''
self.incr("calls")
ret = self.startall(None)
if not ret:
return self.failure("Setup failed, start all nodes failed.")
rc = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null")
if rc == 1:
self.log("Environment test for lxc support failed.")
return self.skipped()
self.start_lxc_simple(node)
self.cleanup_lxc_simple(node)
self.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
if self.failed == 1:
return self.failure(self.fail_string)
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [
r"Updating failcount for ping",
r"schedulerd.*: Recover (ping|lxc-ms|container)\s*\(.*\)",
# The orphaned lxc-ms resource causes an expected transition error
# that is a result of the scheduler not having knowledge that the
# promotable resource used to be a clone. As a result, it looks like that
# resource is running in multiple locations when it shouldn't... But in
# this instance we know why this error is occurring and that it is expected.
r"Calculated [Tt]ransition .*pe-error",
r"Resource lxc-ms .* is active on 2 nodes attempting recovery",
r"Unknown operation: fail",
r"VirtualDomain.*ERROR: Unable to determine emulator",
]
AllTestClasses.append(RemoteLXC)
class RemoteDriver(CTSTest):
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name = self.__class__.__name__
self.is_docker_unsafe = 1
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
self.stop = StopTest(cm)
self.remote_rsc = "remote-rsc"
self.cib_cmd = """cibadmin -C -o %s -X '%s' """
self.reset()
def reset(self):
self.pcmk_started = 0
self.failed = False
self.fail_string = ""
self.remote_node_added = 0
self.remote_rsc_added = 0
self.remote_use_reconnect_interval = self.Env.RandomGen.choice([True,False])
def fail(self, msg):
""" Mark test as failed. """
self.failed = True
# Always log the failure.
self.logger.log(msg)
# Use first failure as test status, as it's likely to be most useful.
if not self.fail_string:
self.fail_string = msg
def get_othernode(self, node):
for othernode in self.Env["nodes"]:
if othernode == node:
# we don't want to try and use the cib that we just shutdown.
# find a cluster node that is not our soon to be remote-node.
continue
else:
return othernode
def del_rsc(self, node, rsc):
othernode = self.get_othernode(node)
rc = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc))
if rc != 0:
self.fail("Removal of resource '%s' failed" % rsc)
def add_rsc(self, node, rsc_xml):
othernode = self.get_othernode(node)
rc = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml))
if rc != 0:
self.fail("resource creation failed")
def add_primitive_rsc(self, node):
rsc_xml = """
""" % { "node": self.remote_rsc }
self.add_rsc(node, rsc_xml)
if not self.failed:
self.remote_rsc_added = 1
def add_connection_rsc(self, node):
rsc_xml = """
""" % { "node": self.remote_node, "server": node }
if self.remote_use_reconnect_interval:
# Set reconnect interval on resource
rsc_xml = rsc_xml + """
""" % (self.remote_node)
rsc_xml = rsc_xml + """
""" % { "node": self.remote_node }
self.add_rsc(node, rsc_xml)
if not self.failed:
self.remote_node_added = 1
def disable_services(self, node):
self.corosync_enabled = self.Env.service_is_enabled(node, "corosync")
if self.corosync_enabled:
self.Env.disable_service(node, "corosync")
self.pacemaker_enabled = self.Env.service_is_enabled(node, "pacemaker")
if self.pacemaker_enabled:
self.Env.disable_service(node, "pacemaker")
def restore_services(self, node):
if self.corosync_enabled:
self.Env.enable_service(node, "corosync")
if self.pacemaker_enabled:
self.Env.enable_service(node, "pacemaker")
def stop_pcmk_remote(self, node):
# disable pcmk remote
for i in range(10):
rc = self.rsh(node, "service pacemaker_remote stop")
if rc != 0:
time.sleep(6)
else:
break
def start_pcmk_remote(self, node):
for i in range(10):
rc = self.rsh(node, "service pacemaker_remote start")
if rc != 0:
time.sleep(6)
else:
self.pcmk_started = 1
break
def freeze_pcmk_remote(self, node):
""" Simulate a Pacemaker Remote daemon failure. """
# We freeze the process.
self.rsh(node, "killall -STOP pacemaker-remoted")
def resume_pcmk_remote(self, node):
# We resume the process.
self.rsh(node, "killall -CONT pacemaker-remoted")
def start_metal(self, node):
# Cluster nodes are reused as remote nodes in remote tests. If cluster
# services were enabled at boot, in case the remote node got fenced, the
# cluster node would join instead of the expected remote one. Meanwhile
# pacemaker_remote would not be able to start. Depending on the chances,
# the situations might not be able to be orchestrated gracefully any more.
#
# Temporarily disable any enabled cluster serivces.
self.disable_services(node)
pcmk_started = 0
# make sure the resource doesn't already exist for some reason
self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc))
self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node))
if not self.stop(node):
self.fail("Failed to shutdown cluster node %s" % node)
return
self.start_pcmk_remote(node)
if self.pcmk_started == 0:
self.fail("Failed to start pacemaker_remote on node %s" % node)
return
# Convert node to baremetal now that it has shutdown the cluster stack
pats = [ ]
watch = self.create_watch(pats, 120)
watch.setwatch()
pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node))
pats.append(self.templates["Pat:DC_IDLE"])
self.add_connection_rsc(node)
self.set_timer("remoteMetalInit")
watch.lookforall()
self.log_timer("remoteMetalInit")
if watch.unmatched:
self.fail("Unmatched patterns: %s" % watch.unmatched)
def migrate_connection(self, node):
if self.failed:
return
pats = [ ]
pats.append(self.templates["Pat:RscOpOK"] % ("migrate_to", self.remote_node))
pats.append(self.templates["Pat:RscOpOK"] % ("migrate_from", self.remote_node))
pats.append(self.templates["Pat:DC_IDLE"])
watch = self.create_watch(pats, 120)
watch.setwatch()
(rc, lines) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), None)
if rc != 0:
self.fail("failed to move remote node connection resource")
return
self.set_timer("remoteMetalMigrate")
watch.lookforall()
self.log_timer("remoteMetalMigrate")
if watch.unmatched:
self.fail("Unmatched patterns: %s" % watch.unmatched)
return
def fail_rsc(self, node):
if self.failed:
return
watchpats = [ ]
watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("stop", self.remote_rsc, self.remote_node))
watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
watchpats.append(self.templates["Pat:DC_IDLE"])
watch = self.create_watch(watchpats, 120)
watch.setwatch()
self.debug("causing dummy rsc to fail.")
rc = self.rsh(node, "rm -f /var/run/resource-agents/Dummy*")
self.set_timer("remoteRscFail")
watch.lookforall()
self.log_timer("remoteRscFail")
if watch.unmatched:
self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched)
def fail_connection(self, node):
if self.failed:
return
watchpats = [ ]
watchpats.append(self.templates["Pat:FenceOpOK"] % self.remote_node)
watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node)
watch = self.create_watch(watchpats, 120)
watch.setwatch()
# freeze the pcmk remote daemon. this will result in fencing
self.debug("Force stopped active remote node")
self.freeze_pcmk_remote(node)
self.debug("Waiting for remote node to be fenced.")
self.set_timer("remoteMetalFence")
watch.lookforall()
self.log_timer("remoteMetalFence")
if watch.unmatched:
self.fail("Unmatched patterns: %s" % watch.unmatched)
return
self.debug("Waiting for the remote node to come back up")
self.CM.ns.WaitForNodeToComeUp(node, 120);
pats = [ ]
watch = self.create_watch(pats, 240)
watch.setwatch()
pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node))
if self.remote_rsc_added == 1:
pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
# start the remote node again watch it integrate back into cluster.
self.start_pcmk_remote(node)
if self.pcmk_started == 0:
self.fail("Failed to start pacemaker_remote on node %s" % node)
return
self.debug("Waiting for remote node to rejoin cluster after being fenced.")
self.set_timer("remoteMetalRestart")
watch.lookforall()
self.log_timer("remoteMetalRestart")
if watch.unmatched:
self.fail("Unmatched patterns: %s" % watch.unmatched)
return
def add_dummy_rsc(self, node):
if self.failed:
return
# verify we can put a resource on the remote node
pats = [ ]
watch = self.create_watch(pats, 120)
watch.setwatch()
pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
pats.append(self.templates["Pat:DC_IDLE"])
# Add a resource that must live on remote-node
self.add_primitive_rsc(node)
# force that rsc to prefer the remote node.
(rc, line) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), None)
if rc != 0:
self.fail("Failed to place remote resource on remote node.")
return
self.set_timer("remoteMetalRsc")
watch.lookforall()
self.log_timer("remoteMetalRsc")
if watch.unmatched:
self.fail("Unmatched patterns: %s" % watch.unmatched)
def test_attributes(self, node):
if self.failed:
return
# This verifies permanent attributes can be set on a remote-node. It also
# verifies the remote-node can edit its own cib node section remotely.
(rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), None)
if rc != 0:
self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line))
return
(rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % (self.remote_node), None)
if rc != 0:
self.fail("Failed to get remote-node attribute")
return
(rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), None)
if rc != 0:
self.fail("Failed to delete remote-node attribute")
return
def cleanup_metal(self, node):
self.restore_services(node)
if self.pcmk_started == 0:
return
pats = [ ]
watch = self.create_watch(pats, 120)
watch.setwatch()
if self.remote_rsc_added == 1:
pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_rsc))
if self.remote_node_added == 1:
pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_node))
self.set_timer("remoteMetalCleanup")
self.resume_pcmk_remote(node)
if self.remote_rsc_added == 1:
# Remove dummy resource added for remote node tests
self.debug("Cleaning up dummy rsc put on remote node")
self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % self.remote_rsc)
self.del_rsc(node, self.remote_rsc)
if self.remote_node_added == 1:
# Remove remote node's connection resource
self.debug("Cleaning up remote node connection resource")
self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % (self.remote_node))
self.del_rsc(node, self.remote_node)
watch.lookforall()
self.log_timer("remoteMetalCleanup")
if watch.unmatched:
self.fail("Unmatched patterns: %s" % watch.unmatched)
self.stop_pcmk_remote(node)
self.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
if self.remote_node_added == 1:
# Remove remote node itself
self.debug("Cleaning up node entry for remote node")
self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node)
def setup_env(self, node):
self.remote_node = "remote-%s" % (node)
# we are assuming if all nodes have a key, that it is
# the right key... If any node doesn't have a remote
# key, we regenerate it everywhere.
if self.rsh.exists_on_all("/etc/pacemaker/authkey", self.Env["nodes"]):
return
# create key locally
(handle, keyfile) = tempfile.mkstemp(".cts")
os.close(handle)
subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# sync key throughout the cluster
for node in self.Env["nodes"]:
self.rsh(node, "mkdir -p --mode=0750 /etc/pacemaker")
self.rsh.cp(keyfile, "root@%s:/etc/pacemaker/authkey" % node)
self.rsh(node, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey")
self.rsh(node, "chmod 0640 /etc/pacemaker/authkey")
os.unlink(keyfile)
def is_applicable(self):
if not self.is_applicable_common():
return False
for node in self.Env["nodes"]:
rc = self.rsh(node, "which pacemaker-remoted >/dev/null 2>&1")
if rc != 0:
return False
return True
def start_new_test(self, node):
self.incr("calls")
self.reset()
ret = self.startall(None)
if not ret:
return self.failure("setup failed: could not start all nodes")
self.setup_env(node)
self.start_metal(node)
self.add_dummy_rsc(node)
return True
def __call__(self, node):
return self.failure("This base class is not meant to be called directly.")
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [ r"""is running on remote.*which isn't allowed""",
r"""Connection terminated""",
r"""Could not send remote""",
]
# RemoteDriver is just a base class for other tests, so it is not added to AllTestClasses
class RemoteBasic(RemoteDriver):
def __call__(self, node):
'''Perform the 'RemoteBaremetal' test. '''
if not self.start_new_test(node):
return self.failure(self.fail_string)
self.test_attributes(node)
self.cleanup_metal(node)
self.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
if self.failed:
return self.failure(self.fail_string)
return self.success()
AllTestClasses.append(RemoteBasic)
class RemoteStonithd(RemoteDriver):
def __call__(self, node):
'''Perform the 'RemoteStonithd' test. '''
if not self.start_new_test(node):
return self.failure(self.fail_string)
self.fail_connection(node)
self.cleanup_metal(node)
self.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
if self.failed:
return self.failure(self.fail_string)
return self.success()
def is_applicable(self):
if not RemoteDriver.is_applicable(self):
return False
if "DoFencing" in list(self.Env.keys()):
return self.Env["DoFencing"]
return True
def errorstoignore(self):
ignore_pats = [
r"Lost connection to Pacemaker Remote node",
r"Software caused connection abort",
r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor",
r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*",
r"schedulerd.*:\s+Recover remote-.*\s*\(.*\)",
r"error: Result of monitor operation for .* on remote-.*: Internal communication failure",
]
ignore_pats.extend(RemoteDriver.errorstoignore(self))
return ignore_pats
AllTestClasses.append(RemoteStonithd)
class RemoteMigrate(RemoteDriver):
def __call__(self, node):
'''Perform the 'RemoteMigrate' test. '''
if not self.start_new_test(node):
return self.failure(self.fail_string)
self.migrate_connection(node)
self.cleanup_metal(node)
self.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
if self.failed:
return self.failure(self.fail_string)
return self.success()
def is_applicable(self):
if not RemoteDriver.is_applicable(self):
return 0
# This test requires at least three nodes: one to convert to a
# remote node, one to host the connection originally, and one
# to migrate the connection to.
if len(self.Env["nodes"]) < 3:
return 0
return 1
AllTestClasses.append(RemoteMigrate)
class RemoteRscFailure(RemoteDriver):
def __call__(self, node):
'''Perform the 'RemoteRscFailure' test. '''
if not self.start_new_test(node):
return self.failure(self.fail_string)
# This is an important step. We are migrating the connection
# before failing the resource. This verifies that the migration
# has properly maintained control over the remote-node.
self.migrate_connection(node)
self.fail_rsc(node)
self.cleanup_metal(node)
self.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
if self.failed:
return self.failure(self.fail_string)
return self.success()
def errorstoignore(self):
ignore_pats = [
r"schedulerd.*: Recover remote-rsc\s*\(.*\)",
r"Dummy.*: No process state file found",
]
ignore_pats.extend(RemoteDriver.errorstoignore(self))
return ignore_pats
def is_applicable(self):
if not RemoteDriver.is_applicable(self):
return 0
# This test requires at least three nodes: one to convert to a
# remote node, one to host the connection originally, and one
# to migrate the connection to.
if len(self.Env["nodes"]) < 3:
return 0
return 1
AllTestClasses.append(RemoteRscFailure)
# vim:ts=4:sw=4:et:
diff --git a/cts/lab/ClusterManager.py b/cts/lab/ClusterManager.py
index 8daffaebb1..002d45b5ec 100644
--- a/cts/lab/ClusterManager.py
+++ b/cts/lab/ClusterManager.py
@@ -1,1067 +1,1067 @@
""" ClusterManager class for Pacemaker's Cluster Test Suite (CTS)
"""
-__copyright__ = """Copyright 2000-2021 the Pacemaker project contributors.
+__copyright__ = """Copyright 2000-2022 the Pacemaker project contributors.
Certain portions by Huang Zhen are copyright 2004
International Business Machines. The version control history for this file
may have further details."""
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import os
import re
import time
from collections import UserDict
from cts.CTSvars import *
from cts.CIB import ConfigFactory
from cts.CTS import NodeStatus, Process
from cts.CTStests import AuditResource
from cts.logging import LogFactory
from cts.watcher import LogWatcher
from cts.remote import RemoteFactory
from cts.environment import EnvFactory
from cts.patterns import PatternSelector
has_log_stats = {}
log_stats_bin = CTSvars.CRM_DAEMON_DIR + "/cts_log_stats.sh"
log_stats = """
#!%s
# Tool for generating system load reports while CTS runs
trap "" 1
f=$1; shift
action=$1; shift
base=`basename $0`
if [ ! -e $f ]; then
echo "Time, Load 1, Load 5, Load 15, Test Marker" > $f
fi
function killpid() {
if [ -e $f.pid ]; then
kill -9 `cat $f.pid`
rm -f $f.pid
fi
}
function status() {
if [ -e $f.pid ]; then
kill -0 `cat $f.pid`
return $?
else
return 1
fi
}
function start() {
# Is it already running?
if
status
then
return
fi
echo Active as $$
echo $$ > $f.pid
while [ 1 = 1 ]; do
uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
#top -b -c -n1 | grep -e usr/libexec/pacemaker | grep -v -e grep -e python | head -n 1 | sed s@/usr/libexec/pacemaker/@@ | awk '{print " 0, "$9", "$10", "$12}' | tr '\\n' ',' >> $f
echo 0 >> $f
sleep 5
done
}
case $action in
start)
start
;;
start-bg|bg)
# Use c --ssh -- ./stats.sh file start-bg
nohup $0 $f start >/dev/null 2>&1 > $f
echo " $*" >> $f
start
;;
*)
echo "Unknown action: $action."
;;
esac
""" % (CTSvars.BASH_PATH)
class ClusterManager(UserDict):
'''The Cluster Manager class.
This is an subclass of the Python dictionary class.
(this is because it contains lots of {name,value} pairs,
not because it's behavior is that terribly similar to a
dictionary in other ways.)
This is an abstract class which class implements high-level
operations on the cluster and/or its cluster managers.
Actual cluster managers classes are subclassed from this type.
One of the things we do is track the state we think every node should
be in.
'''
def __InitialConditions(self):
#if os.geteuid() != 0:
# raise ValueError("Must Be Root!")
None
def _finalConditions(self):
for key in list(self.keys()):
if self[key] == None:
raise ValueError("Improper derivation: self[" + key + "] must be overridden by subclass.")
def __init__(self):
self.Env = EnvFactory().getInstance()
self.templates = PatternSelector(self.Env["Name"])
self.__InitialConditions()
self.logger = LogFactory()
self.TestLoggingLevel=0
self.data = {}
self.name = self.Env["Name"]
self.rsh = RemoteFactory().getInstance()
self.ShouldBeStatus={}
self.ns = NodeStatus(self.Env)
self.OurNode = os.uname()[1].lower()
self.__instance_errorstoignore = []
self.fastfail = 0
self.cib_installed = 0
self.config = None
self.cluster_monitor = 0
self.use_short_names = 1
if self.Env["DoBSC"]:
del self.templates["Pat:They_stopped"]
self._finalConditions()
self.check_transitions = 0
self.check_elections = 0
self.CIBsync = {}
self.CibFactory = ConfigFactory(self)
self.cib = self.CibFactory.createConfig(self.Env["Schema"])
def __getitem__(self, key):
if key == "Name":
return self.name
print("FIXME: Getting %s from %s" % (key, repr(self)))
if key in self.data:
return self.data[key]
return self.templates.get_patterns(self.Env["Name"], key)
def __setitem__(self, key, value):
print("FIXME: Setting %s=%s on %s" % (key, value, repr(self)))
self.data[key] = value
def key_for_node(self, node):
return node
def instance_errorstoignore_clear(self):
'''Allows the test scenario to reset instance errors to ignore on each iteration.'''
self.__instance_errorstoignore = []
def instance_errorstoignore(self):
'''Return list of errors which are 'normal' for a specific test instance'''
return self.__instance_errorstoignore
def log(self, args):
self.logger.log(args)
def debug(self, args):
self.logger.debug(args)
def upcount(self):
'''How many nodes are up?'''
count = 0
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
count = count + 1
return count
def install_support(self, command="install"):
for node in self.Env["nodes"]:
self.rsh(node, CTSvars.CRM_DAEMON_DIR + "/cts-support " + command)
def prepare_fencing_watcher(self, name):
# If we don't have quorum now but get it as a result of starting this node,
# then a bunch of nodes might get fenced
upnode = None
if self.HasQuorum(None):
self.debug("Have quorum")
return None
if not self.templates["Pat:Fencing_start"]:
print("No start pattern")
return None
if not self.templates["Pat:Fencing_ok"]:
print("No ok pattern")
return None
stonith = None
stonithPats = []
for peer in self.Env["nodes"]:
if self.ShouldBeStatus[peer] != "up":
stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
stonith = LogWatcher(self.Env["LogFileName"], stonithPats, "StartupFencing", 0, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
stonith.setwatch()
return stonith
def fencing_cleanup(self, node, stonith):
peer_list = []
peer_state = {}
self.debug("Looking for nodes that were fenced as a result of %s starting" % node)
# If we just started a node, we may now have quorum (and permission to fence)
if not stonith:
self.debug("Nothing to do")
return peer_list
q = self.HasQuorum(None)
if not q and len(self.Env["nodes"]) > 2:
# We didn't gain quorum - we shouldn't have shot anyone
self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"])))
return peer_list
for n in self.Env["nodes"]:
peer_state[n] = "unknown"
# Now see if any states need to be updated
self.debug("looking for: " + repr(stonith.regexes))
shot = stonith.look(0)
while shot:
line = repr(shot)
self.debug("Found: " + line)
del stonith.regexes[stonith.whichmatch]
# Extract node name
for n in self.Env["nodes"]:
if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
peer = n
peer_state[peer] = "complete"
self.__instance_errorstoignore.append(self.templates["Pat:Fencing_ok"] % peer)
elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
# TODO: Correctly detect multiple fencing operations for the same host
peer = n
peer_state[peer] = "in-progress"
self.__instance_errorstoignore.append(self.templates["Pat:Fencing_start"] % peer)
if not peer:
self.logger.log("ERROR: Unknown stonith match: %s" % line)
elif not peer in peer_list:
self.debug("Found peer: " + peer)
peer_list.append(peer)
# Get the next one
shot = stonith.look(60)
for peer in peer_list:
self.debug(" Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
if self.Env["at-boot"]:
self.ShouldBeStatus[peer] = "up"
else:
self.ShouldBeStatus[peer] = "down"
if peer_state[peer] == "in-progress":
# Wait for any in-progress operations to complete
shot = stonith.look(60)
while len(stonith.regexes) and shot:
line = repr(shot)
self.debug("Found: " + line)
del stonith.regexes[stonith.whichmatch]
shot = stonith.look(60)
# Now make sure the node is alive too
self.ns.WaitForNodeToComeUp(peer, self.Env["DeadTime"])
# Poll until it comes up
if self.Env["at-boot"]:
if not self.StataCM(peer):
time.sleep(self.Env["StartTime"])
if not self.StataCM(peer):
self.logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
return None
return peer_list
def StartaCM(self, node, verbose=False):
'''Start up the cluster manager on a given node'''
if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node))
else: self.debug("Starting %s on node %s" % (self.templates["Name"], node))
ret = 1
if not node in self.ShouldBeStatus:
self.ShouldBeStatus[node] = "down"
if self.ShouldBeStatus[node] != "down":
return 1
patterns = []
# Technically we should always be able to notice ourselves starting
patterns.append(self.templates["Pat:Local_started"] % node)
if self.upcount() == 0:
patterns.append(self.templates["Pat:DC_started"] % node)
else:
patterns.append(self.templates["Pat:NonDC_started"] % node)
watch = LogWatcher(
self.Env["LogFileName"], patterns, "StartaCM", self.Env["StartTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
self.install_config(node)
self.ShouldBeStatus[node] = "any"
if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
self.logger.log ("%s was already started" % (node))
return 1
stonith = self.prepare_fencing_watcher(node)
watch.setwatch()
if self.rsh(node, self.templates["StartCmd"]) != 0:
self.logger.log ("Warn: Start command failed on node %s" % (node))
self.fencing_cleanup(node, stonith)
return None
self.ShouldBeStatus[node] = "up"
watch_result = watch.lookforall()
if watch.unmatched:
for regex in watch.unmatched:
self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
if watch_result and self.cluster_stable(self.Env["DeadTime"]):
#self.debug("Found match: "+ repr(watch_result))
self.fencing_cleanup(node, stonith)
return 1
elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
self.fencing_cleanup(node, stonith)
return 1
self.logger.log ("Warn: Start failed for node %s" % (node))
return None
def StartaCMnoBlock(self, node, verbose=False):
'''Start up the cluster manager on a given node with none-block mode'''
if verbose: self.logger.log("Starting %s on node %s" % (self["Name"], node))
else: self.debug("Starting %s on node %s" % (self["Name"], node))
self.install_config(node)
self.rsh(node, self.templates["StartCmd"], synchronous=0)
self.ShouldBeStatus[node] = "up"
return 1
def StopaCM(self, node, verbose=False, force=False):
'''Stop the cluster manager on a given node'''
if verbose: self.logger.log("Stopping %s on node %s" % (self["Name"], node))
else: self.debug("Stopping %s on node %s" % (self["Name"], node))
if self.ShouldBeStatus[node] != "up" and force == False:
return 1
if self.rsh(node, self.templates["StopCmd"]) == 0:
# Make sure we can continue even if corosync leaks
# fdata-* is the old name
#self.rsh(node, "rm -rf /dev/shm/qb-* /dev/shm/fdata-*")
self.ShouldBeStatus[node] = "down"
self.cluster_stable(self.Env["DeadTime"])
return 1
else:
self.logger.log ("ERROR: Could not stop %s on node %s" % (self["Name"], node))
return None
def StopaCMnoBlock(self, node):
'''Stop the cluster manager on a given node with none-block mode'''
self.debug("Stopping %s on node %s" % (self["Name"], node))
self.rsh(node, self.templates["StopCmd"], synchronous=0)
self.ShouldBeStatus[node] = "down"
return 1
def RereadCM(self, node):
'''Force the cluster manager on a given node to reread its config
This may be a no-op on certain cluster managers.
'''
rc=self.rsh(node, self.templates["RereadCmd"])
if rc == 0:
return 1
else:
self.logger.log ("Could not force %s on node %s to reread its config"
% (self["Name"], node))
return None
def startall(self, nodelist=None, verbose=False, quick=False):
'''Start the cluster manager on every node in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
map = {}
if not nodelist:
nodelist = self.Env["nodes"]
for node in nodelist:
if self.ShouldBeStatus[node] == "down":
self.ns.WaitForAllNodesToComeUp(nodelist, 300)
if not quick:
# This is used for "basic sanity checks", so only start one node ...
if not self.StartaCM(node, verbose=verbose):
return 0
return 1
# Approximation of SimulStartList for --boot
watchpats = [ ]
watchpats.append(self.templates["Pat:DC_IDLE"])
for node in nodelist:
watchpats.append(self.templates["Pat:InfraUp"] % node)
watchpats.append(self.templates["Pat:PacemakerUp"] % node)
watchpats.append(self.templates["Pat:Local_started"] % node)
watchpats.append(self.templates["Pat:They_up"] % (nodelist[0], node))
# Start all the nodes - at about the same time...
watch = LogWatcher(self.Env["LogFileName"], watchpats, "fast-start", self.Env["DeadTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
watch.setwatch()
if not self.StartaCM(nodelist[0], verbose=verbose):
return 0
for node in nodelist:
self.StartaCMnoBlock(node, verbose=verbose)
watch.lookforall()
if watch.unmatched:
for regex in watch.unmatched:
self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
if not self.cluster_stable():
self.logger.log("Cluster did not stabilize")
return 0
return 1
def stopall(self, nodelist=None, verbose=False, force=False):
'''Stop the cluster managers on every node in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
ret = 1
map = {}
if not nodelist:
nodelist = self.Env["nodes"]
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up" or force == True:
if not self.StopaCM(node, verbose=verbose, force=force):
ret = 0
return ret
def rereadall(self, nodelist=None):
'''Force the cluster managers on every node in the cluster
to reread their config files. We can do it on a subset of the
cluster if nodelist is not None.
'''
map = {}
if not nodelist:
nodelist = self.Env["nodes"]
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
self.RereadCM(node)
def statall(self, nodelist=None):
'''Return the status of the cluster managers in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
result = {}
if not nodelist:
nodelist = self.Env["nodes"]
for node in nodelist:
if self.StataCM(node):
result[node] = "up"
else:
result[node] = "down"
return result
def isolate_node(self, target, nodes=None):
'''isolate the communication between the nodes'''
if not nodes:
nodes = self.Env["nodes"]
for node in nodes:
if node != target:
rc = self.rsh(target, self.templates["BreakCommCmd"] % self.key_for_node(node))
if rc != 0:
self.logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
return None
else:
self.debug("Communication cut between %s and %s" % (target, node))
return 1
def unisolate_node(self, target, nodes=None):
'''fix the communication between the nodes'''
if not nodes:
nodes = self.Env["nodes"]
for node in nodes:
if node != target:
restored = 0
# Limit the amount of time we have asynchronous connectivity for
# Restore both sides as simultaneously as possible
self.rsh(target, self.templates["FixCommCmd"] % self.key_for_node(node), synchronous=0)
self.rsh(node, self.templates["FixCommCmd"] % self.key_for_node(target), synchronous=0)
self.debug("Communication restored between %s and %s" % (target, node))
def reducecomm_node(self,node):
'''reduce the communication between the nodes'''
rc = self.rsh(node, self.templates["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"]))
if rc == 0:
return 1
else:
self.logger.log("Could not reduce the communication between the nodes from node: %s" % node)
return None
def restorecomm_node(self,node):
'''restore the saved communication between the nodes'''
rc = 0
if float(self.Env["XmitLoss"]) != 0 or float(self.Env["RecvLoss"]) != 0 :
rc = self.rsh(node, self.templates["RestoreCommCmd"]);
if rc == 0:
return 1
else:
self.logger.log("Could not restore the communication between the nodes from node: %s" % node)
return None
def oprofileStart(self, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileStart(n)
elif node in self.Env["oprofile"]:
self.debug("Enabling oprofile on %s" % node)
self.rsh(node, "opcontrol --init")
self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
self.rsh(node, "opcontrol --start")
self.rsh(node, "opcontrol --reset")
def oprofileSave(self, test, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileSave(test, n)
elif node in self.Env["oprofile"]:
self.rsh(node, "opcontrol --dump")
self.rsh(node, "opcontrol --save=cts.%d" % test)
# Read back with: opreport -l session:cts.0 image:/c*
if None:
self.rsh(node, "opcontrol --reset")
else:
self.oprofileStop(node)
self.oprofileStart(node)
def oprofileStop(self, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileStop(n)
elif node in self.Env["oprofile"]:
self.debug("Stopping oprofile on %s" % node)
self.rsh(node, "opcontrol --reset")
self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
def StatsExtract(self):
if not self.Env["stats"]:
return
for host in self.Env["nodes"]:
log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
if host in has_log_stats:
self.rsh(host, '''bash %s %s stop''' % (log_stats_bin, log_stats_file))
(rc, lines) = self.rsh(host, '''cat %s''' % log_stats_file, stdout=2)
self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
fname = "cts-stats-%d-nodes-%s.csv" % (len(self.Env["nodes"]), host)
print("Extracted stats: %s" % fname)
fd = open(fname, "a")
fd.writelines(lines)
fd.close()
def StatsMark(self, testnum):
'''Mark the test number in the stats log'''
global has_log_stats
if not self.Env["stats"]:
return
for host in self.Env["nodes"]:
log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
if not host in has_log_stats:
global log_stats
global log_stats_bin
script=log_stats
#script = re.sub("\\\\", "\\\\", script)
script = re.sub('\"', '\\\"', script)
script = re.sub("'", "\'", script)
script = re.sub("`", "\`", script)
script = re.sub("\$", "\\\$", script)
self.debug("Installing %s on %s" % (log_stats_bin, host))
self.rsh(host, '''echo "%s" > %s''' % (script, log_stats_bin), silent=True)
self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
has_log_stats[host] = 1
# Now mark it
self.rsh(host, '''bash %s %s mark %s''' % (log_stats_bin, log_stats_file, testnum), synchronous=0)
def errorstoignore(self):
# At some point implement a more elegant solution that
# also produces a report at the end
""" Return a list of known error messages that should be ignored """
return PatternSelector().get_patterns(self.name, "BadNewsIgnore")
def install_config(self, node):
if not self.ns.WaitForNodeToComeUp(node):
self.log("Node %s is not up." % node)
return None
if not node in self.CIBsync and self.Env["ClobberCIB"] == 1:
self.CIBsync[node] = 1
self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*")
# Only install the CIB on the first node, all the other ones will pick it up from there
if self.cib_installed == 1:
return None
self.cib_installed = 1
if self.Env["CIBfilename"] == None:
self.log("Installing Generated CIB on node %s" % (node))
self.cib.install(node)
else:
self.log("Installing CIB (%s) on node %s" % (self.Env["CIBfilename"], node))
if 0 != self.rsh.cp(self.Env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node)):
raise ValueError("Can not scp file to %s %d"%(node))
self.rsh(node, "chown "+CTSvars.CRM_DAEMON_USER+" "+CTSvars.CRM_CONFIG_DIR+"/cib.xml")
def prepare(self):
'''Finish the Initialization process. Prepare to test...'''
self.partitions_expected = 1
for node in self.Env["nodes"]:
self.ShouldBeStatus[node] = ""
if self.Env["experimental-tests"]:
self.unisolate_node(node)
self.StataCM(node)
def test_node_CM(self, node):
'''Report the status of the cluster manager on a given node'''
watchpats = [ ]
watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)")
watchpats.append(self.templates["Pat:NonDC_started"] % node)
watchpats.append(self.templates["Pat:DC_started"] % node)
idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterIdle", hosts=[node], kind=self.Env["LogWatcher"])
idle_watch.setwatch()
out = self.rsh(node, self.templates["StatusCmd"]%node, 1)
self.debug("Node %s status: '%s'" %(node, out))
if not out or (out.find('ok') < 0):
if self.ShouldBeStatus[node] == "up":
self.log(
"Node status for %s is %s but we think it should be %s"
% (node, "down", self.ShouldBeStatus[node]))
self.ShouldBeStatus[node] = "down"
return 0
if self.ShouldBeStatus[node] == "down":
self.log(
"Node status for %s is %s but we think it should be %s: %s"
% (node, "up", self.ShouldBeStatus[node], out))
self.ShouldBeStatus[node] = "up"
# check the output first - because syslog-ng loses messages
if out.find('S_NOT_DC') != -1:
# Up and stable
return 2
if out.find('S_IDLE') != -1:
# Up and stable
return 2
# fall back to syslog-ng and wait
if not idle_watch.look():
# just up
self.debug("Warn: Node %s is unstable: %s" % (node, out))
return 1
# Up and stable
return 2
# Is the node up or is the node down
def StataCM(self, node):
'''Report the status of the cluster manager on a given node'''
if self.test_node_CM(node) > 0:
return 1
return None
# Being up and being stable is not the same question...
def node_stable(self, node):
'''Report the status of the cluster manager on a given node'''
if self.test_node_CM(node) == 2:
return 1
self.log("Warn: Node %s not stable" % (node))
return None
def partition_stable(self, nodes, timeout=None):
watchpats = [ ]
watchpats.append("Current ping state: S_IDLE")
watchpats.append(self.templates["Pat:DC_IDLE"])
self.debug("Waiting for cluster stability...")
if timeout == None:
timeout = self.Env["DeadTime"]
if len(nodes) < 3:
self.debug("Cluster is inactive")
return 1
idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterStable", timeout, hosts=nodes.split(), kind=self.Env["LogWatcher"])
idle_watch.setwatch()
for node in nodes.split():
# have each node dump its current state
self.rsh(node, self.templates["StatusCmd"] % node, 1)
ret = idle_watch.look()
while ret:
self.debug(ret)
for node in nodes.split():
if re.search(node, ret):
return 1
ret = idle_watch.look()
self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout))
return None
def cluster_stable(self, timeout=None, double_check=False):
partitions = self.find_partitions()
for partition in partitions:
if not self.partition_stable(partition, timeout):
return None
if double_check:
# Make sure we are really stable and that all resources,
# including those that depend on transient node attributes,
# are started if they were going to be
time.sleep(5)
for partition in partitions:
if not self.partition_stable(partition, timeout):
return None
return 1
def is_node_dc(self, node, status_line=None):
rc = 0
if not status_line:
status_line = self.rsh(node, self.templates["StatusCmd"]%node, 1)
if not status_line:
rc = 0
elif status_line.find('S_IDLE') != -1:
rc = 1
elif status_line.find('S_INTEGRATION') != -1:
rc = 1
elif status_line.find('S_FINALIZE_JOIN') != -1:
rc = 1
elif status_line.find('S_POLICY_ENGINE') != -1:
rc = 1
elif status_line.find('S_TRANSITION_ENGINE') != -1:
rc = 1
return rc
def active_resources(self, node):
(rc, output) = self.rsh(node, """crm_resource -c""", None)
resources = []
for line in output:
if re.search("^Resource", line):
tmp = AuditResource(self, line)
if tmp.type == "primitive" and tmp.host == node:
resources.append(tmp.id)
return resources
def ResourceLocation(self, rid):
ResourceNodes = []
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
cmd = self.templates["RscRunning"] % (rid)
(rc, lines) = self.rsh(node, cmd, None)
if rc == 127:
self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd)
for line in lines:
self.log("Output: "+line)
elif rc == 0:
ResourceNodes.append(node)
return ResourceNodes
def find_partitions(self):
ccm_partitions = []
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
partition = self.rsh(node, self.templates["PartitionCmd"], 1)
if not partition:
self.log("no partition details for %s" % node)
elif len(partition) > 2:
nodes = partition.split()
nodes.sort()
partition = ' '.join(nodes)
found = 0
for a_partition in ccm_partitions:
if partition == a_partition:
found = 1
if found == 0:
self.debug("Adding partition from %s: %s" % (node, partition))
ccm_partitions.append(partition)
else:
self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node))
else:
self.log("bad partition details for %s" % node)
else:
self.debug("Node %s is down... skipping" % node)
self.debug("Found partitions: %s" % repr(ccm_partitions) )
return ccm_partitions
def HasQuorum(self, node_list):
# If we are auditing a partition, then one side will
# have quorum and the other not.
# So the caller needs to tell us which we are checking
# If no value for node_list is specified... assume all nodes
if not node_list:
node_list = self.Env["nodes"]
for node in node_list:
if self.ShouldBeStatus[node] == "up":
quorum = self.rsh(node, self.templates["QuorumCmd"], 1)
if quorum.find("1") != -1:
return 1
elif quorum.find("0") != -1:
return 0
else:
self.debug("WARN: Unexpected quorum test result from " + node + ":" + quorum)
return 0
def Components(self):
complist = []
common_ignore = [
"Pending action:",
"(ERROR|error): crm_log_message_adv:",
"(ERROR|error): MSG: No message to dump",
"pending LRM operations at shutdown",
"Lost connection to the CIB manager",
"Connection to the CIB terminated...",
"Sending message to the CIB manager FAILED",
"Action A_RECOVER .* not supported",
"(ERROR|error): stonithd_op_result_ready: not signed on",
"pingd.*(ERROR|error): send_update: Could not send update",
"send_ipc_message: IPC Channel to .* is not connected",
"unconfirmed_actions: Waiting on .* unconfirmed actions",
"cib_native_msgready: Message pending on command channel",
r": Performing A_EXIT_1 - forcefully exiting ",
r"Resource .* was active at shutdown. You may ignore this error if it is unmanaged.",
]
stonith_ignore = [
r"Updating failcount for child_DoFencing",
r"error.*: Fencer connection failed \(will retry\)",
"pacemaker-execd.*(ERROR|error): stonithd_receive_ops_result failed.",
]
stonith_ignore.extend(common_ignore)
ccm = Process(self, "ccm", triggersreboot=self.fastfail, pats = [
"State transition .* S_RECOVERY",
"pacemaker-controld.*Action A_RECOVER .* not supported",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*: Could not recover from internal error",
"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
# these status numbers are likely wrong now
r"pacemaker-controld.*exited with status 2",
r"attrd.*exited with status 1",
r"cib.*exited with status 2",
# Not if it was fenced
# "A new node joined the cluster",
# "WARN: determine_online_status: Node .* is unclean",
-# "Scheduling Node .* for STONITH",
+# "Scheduling node .* for fencing",
# "Executing .* fencing operation",
# "tengine_stonith_callback: .*result=0",
# "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE",
# "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN",
"State transition S_STARTING -> S_PENDING",
], badnews_ignore = common_ignore)
based = Process(self, "pacemaker-based", triggersreboot=self.fastfail, pats = [
"State transition .* S_RECOVERY",
"Lost connection to the CIB manager",
"Connection to the CIB manager terminated",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
r"pacemaker-controld.*: Could not recover from internal error",
# these status numbers are likely wrong now
r"pacemaker-controld.*exited with status 2",
r"attrd.*exited with status 1",
], badnews_ignore = common_ignore)
execd = Process(self, "pacemaker-execd", triggersreboot=self.fastfail, pats = [
"State transition .* S_RECOVERY",
"LRM Connection failed",
"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
"State transition S_STARTING -> S_PENDING",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*: Could not recover from internal error",
# this status number is likely wrong now
r"pacemaker-controld.*exited with status 2",
], badnews_ignore = common_ignore)
controld = Process(self, "pacemaker-controld", triggersreboot=self.fastfail,
pats = [
# "WARN: determine_online_status: Node .* is unclean",
-# "Scheduling Node .* for STONITH",
+# "Scheduling node .* for fencing",
# "Executing .* fencing operation",
# "tengine_stonith_callback: .*result=0",
"State transition .* S_IDLE",
"State transition S_STARTING -> S_PENDING",
], badnews_ignore = common_ignore)
schedulerd = Process(self, "pacemaker-schedulerd", triggersreboot=self.fastfail, pats = [
"State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*: Could not recover from internal error",
r"pacemaker-controld.*CRIT.*: Connection to the scheduler failed",
"pacemaker-controld.*I_ERROR.*save_cib_contents",
# this status number is likely wrong now
r"pacemaker-controld.*exited with status 2",
], badnews_ignore = common_ignore, dc_only=1)
if self.Env["DoFencing"] == 1 :
complist.append(Process(self, "stoniths", triggersreboot=self.fastfail, dc_pats = [
r"pacemaker-controld.*CRIT.*: Fencing daemon connection failed",
"Attempting connection to fencing daemon",
], badnews_ignore = stonith_ignore))
if self.fastfail == 0:
ccm.pats.extend([
# these status numbers are likely wrong now
r"attrd.*exited with status 1",
r"pacemaker-(based|controld).*exited with status 2",
])
based.pats.extend([
# these status numbers are likely wrong now
r"attrd.*exited with status 1",
r"pacemaker-controld.*exited with status 2",
])
execd.pats.extend([
# these status numbers are likely wrong now
r"pacemaker-controld.*exited with status 2",
])
complist.append(ccm)
complist.append(based)
complist.append(execd)
complist.append(controld)
complist.append(schedulerd)
return complist
def StandbyStatus(self, node):
out=self.rsh(node, self.templates["StandbyQueryCmd"] % node, 1)
if not out:
return "off"
out = out[:-1]
self.debug("Standby result: "+out)
return out
# status == "on" : Enter Standby mode
# status == "off": Enter Active mode
def SetStandbyMode(self, node, status):
current_status = self.StandbyStatus(node)
cmd = self.templates["StandbyCmd"] % (node, status)
ret = self.rsh(node, cmd)
return True
def AddDummyRsc(self, node, rid):
rsc_xml = """ '
'""" % (rid, rid)
constraint_xml = """ '
'
""" % (rid, node, node, rid)
self.rsh(node, self.templates['CibAddXml'] % (rsc_xml))
self.rsh(node, self.templates['CibAddXml'] % (constraint_xml))
def RemoveDummyRsc(self, node, rid):
constraint = "\"//rsc_location[@rsc='%s']\"" % (rid)
rsc = "\"//primitive[@id='%s']\"" % (rid)
self.rsh(node, self.templates['CibDelXpath'] % constraint)
self.rsh(node, self.templates['CibDelXpath'] % rsc)
diff --git a/cts/lab/patterns.py b/cts/lab/patterns.py
index 38d6e1065a..90cac735d8 100644
--- a/cts/lab/patterns.py
+++ b/cts/lab/patterns.py
@@ -1,415 +1,415 @@
""" Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS)
"""
__copyright__ = "Copyright 2008-2022 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import sys, os
from cts.CTSvars import *
patternvariants = {}
class BasePatterns(object):
def __init__(self, name):
self.name = name
patternvariants[name] = self
self.ignore = [
"avoid confusing Valgrind",
# Logging bug in some versions of libvirtd
r"libvirtd.*: internal error: Failed to parse PCI config address",
# pcs can log this when node is fenced, but fencing is OK in some
# tests (and we will catch it in pacemaker logs when not OK)
r"pcs.daemon:No response from: .* request: get_configs, error:",
]
self.BadNews = []
self.components = {}
self.commands = {
"StatusCmd" : "crmadmin -t 60 -S %s 2>/dev/null",
"CibQuery" : "cibadmin -Ql",
"CibAddXml" : "cibadmin --modify -c --xml-text %s",
"CibDelXpath" : "cibadmin --delete --xpath %s",
# 300,000 == 5 minutes
"RscRunning" : CTSvars.CRM_DAEMON_DIR + "/cts-exec-helper -R -r %s",
"CIBfile" : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml",
"TmpDir" : "/tmp",
"BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
"FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
# tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit
# tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated
# tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1
# tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null
"ReduceCommCmd" : "",
"RestoreCommCmd" : "tc qdisc del dev lo root",
"MaintenanceModeOn" : "cibadmin --modify -c --xml-text ''",
"MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
"StandbyCmd" : "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null",
"StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null",
}
self.search = {
"Pat:DC_IDLE" : "pacemaker-controld.*State transition.*-> S_IDLE",
# This won't work if we have multiple partitions
"Pat:Local_started" : "%s\W.*controller successfully started",
"Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC",
"Pat:DC_started" : r"%s\W.*State transition.*-> S_IDLE",
"Pat:We_stopped" : "%s\W.*OVERRIDE THIS PATTERN",
"Pat:They_stopped" : "%s\W.*LOST:.* %s ",
"Pat:They_dead" : "node %s.*: is dead",
"Pat:They_up" : "%s %s\W.*OVERRIDE THIS PATTERN",
"Pat:TransitionComplete" : "Transition status: Complete: complete",
"Pat:Fencing_start" : r"Requesting peer fencing .* targeting %s",
"Pat:Fencing_ok" : r"pacemaker-fenced.*:\s*Operation .* targeting %s by .* for .*@.*: OK",
"Pat:Fencing_recover" : r"pacemaker-schedulerd.*: Recover %s",
"Pat:Fencing_active" : r"stonith resource .* is active on 2 nodes (attempting recovery)",
"Pat:Fencing_probe" : r"pacemaker-controld.* Result of probe operation for %s on .*: Error",
"Pat:RscOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok",
"Pat:RscOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of %s ",
"Pat:CloneOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of (%s|%s) ",
"Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok",
"Pat:NodeFenced" : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK",
"Pat:FenceOpOK" : "Operation .* targeting %s using .* returned 0",
}
def get_component(self, key):
if key in self.components:
return self.components[key]
print("Unknown component '%s' for %s" % (key, self.name))
return []
def get_patterns(self, key):
if key == "BadNews":
return self.BadNews
elif key == "BadNewsIgnore":
return self.ignore
elif key == "Commands":
return self.commands
elif key == "Search":
return self.search
elif key == "Components":
return self.components
def __getitem__(self, key):
if key == "Name":
return self.name
elif key in self.commands:
return self.commands[key]
elif key in self.search:
return self.search[key]
else:
print("Unknown template '%s' for %s" % (key, self.name))
return None
class crm_corosync(BasePatterns):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
BasePatterns.__init__(self, name)
self.commands.update({
"StartCmd" : "service corosync start && service pacemaker start",
"StopCmd" : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop",
"EpochCmd" : "crm_node -e",
"QuorumCmd" : "crm_node -q",
"PartitionCmd" : "crm_node -p",
})
self.search.update({
# Close enough ... "Corosync Cluster Engine exiting normally" isn't
# printed reliably.
"Pat:We_stopped" : "%s\W.*Unloading all Corosync service engines",
"Pat:They_stopped" : "%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:They_dead" : "pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:They_up" : "\W%s\W.*pacemaker-controld.*Node %s state is now member",
"Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(",
# "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes()
"Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)",
"Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning %s subdaemon after unexpected exit",
"Pat:InfraUp" : "%s\W.*corosync.*Initializing transport",
"Pat:PacemakerUp" : "%s\W.*pacemakerd.*Starting Pacemaker",
})
self.ignore = self.ignore + [
r"crm_mon:",
r"crmadmin:",
r"update_trace_data",
r"async_notify:.*strange, client not found",
r"Parse error: Ignoring unknown option .*nodename",
r"error.*: Operation 'reboot' .* using FencingFail returned ",
r"getinfo response error: 1$",
r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE",
r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)",
]
self.BadNews = [
r"[^(]error:",
r"crit:",
r"ERROR:",
r"CRIT:",
r"Shutting down...NOW",
r"Timer I_TERMINATE just popped",
r"input=I_ERROR",
r"input=I_FAIL",
r"input=I_INTEGRATED cause=C_TIMER_POPPED",
r"input=I_FINALIZED cause=C_TIMER_POPPED",
r"input=I_ERROR",
r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting",
r"schedulerd.*Attempting recovery of resource",
r"is taking more than 2x its timeout",
r"Confirm not received from",
r"Welcome reply not received from",
r"Attempting to schedule .* after a stop",
r"Resource .* was active at shutdown",
r"duplicate entries for call_id",
r"Search terminated:",
r":global_timer_callback",
r"Faking parameter digest creation",
r"Parameters to .* action changed:",
r"Parameters to .* changed",
r"pacemakerd.*\[[0-9]+\] terminated( with signal| as IPC server|$)",
r"pacemaker-schedulerd.*Recover .*\(.* -\> .*\)",
r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
r"Peer is not part of our cluster",
r"We appear to be in an election loop",
r"Unknown node -> we will not deliver message",
r"(Blackbox dump requested|Problem detected)",
r"pacemakerd.*Could not connect to Cluster Configuration Database API",
r"Receiving messages from a node we think is dead",
r"share the same cluster nodeid",
r"share the same name",
#r"crm_ipc_send:.*Request .* failed",
#r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received",
# Not inherently bad, but worth tracking
#r"No need to invoke the TE",
#r"ping.*: DEBUG: Updated connected = 0",
#r"Digest mis-match:",
r"pacemaker-controld:.*Transition failed: terminated",
r"Local CIB .* differs from .*:",
r"warn.*:\s*Continuing but .* will NOT be used",
r"warn.*:\s*Cluster configuration file .* is corrupt",
#r"Executing .* fencing operation",
r"Election storm",
r"stalled the FSA with pending inputs",
]
self.components["common-ignore"] = [
r"Pending action:",
r"resource( was|s were) active at shutdown",
r"pending LRM operations at shutdown",
r"Lost connection to the CIB manager",
r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported",
r"pacemaker-controld.*:\s*Performing A_EXIT_1 - forcefully exiting ",
r".*:\s*Requesting fencing \([^)]+\) of node ",
r"(Blackbox dump requested|Problem detected)",
]
self.components["corosync-ignore"] = [
r"Could not connect to Corosync CFG: CS_ERR_LIBRARY",
r"error:.*Connection to the CPG API failed: Library error",
r"\[[0-9]+\] exited with status [0-9]+ \(",
r"\[[0-9]+\] terminated with signal 15",
r"pacemaker-based.*error:.*Corosync connection lost",
r"pacemaker-fenced.*error:.*Corosync connection terminated",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state",
r"pacemaker-controld.*error:.*Could not recover from internal error",
r"error:.*Connection to cib_(shm|rw).* (failed|closed)",
r"error:.*cib_(shm|rw) IPC provider disconnected while waiting",
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit: Fencing daemon connection failed",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["corosync"] = [
# We expect each daemon to lose its cluster connection.
# However, if the CIB manager loses its connection first,
# it's possible for another daemon to lose that connection and
# exit before losing the cluster connection.
r"pacemakerd.*:\s*warning:.*Lost connection to cluster layer",
r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
- r"schedulerd.*Scheduling Node .* for STONITH",
+ r"schedulerd.*Scheduling node .* for fencing",
r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK",
]
self.components["pacemaker-based"] = [
r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102",
r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.* Respawning pacemaker-attrd subdaemon after unexpected exit",
r"pacemakerd.* Respawning pacemaker-based subdaemon after unexpected exit",
r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit",
r"pacemakerd.* Respawning pacemaker-fenced subdaemon after unexpected exit",
r"pacemaker-.* Connection to cib_.* (failed|closed)",
r"pacemaker-attrd.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
r"pacemaker-controld.* State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-based-ignore"] = [
r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["pacemaker-execd"] = [
r"pacemaker-controld.*Connection to executor failed",
r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.* Respawning pacemaker-execd subdaemon after unexpected exit",
r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit",
]
self.components["pacemaker-execd-ignore"] = [
r"pacemaker-(attrd|controld).*Connection to lrmd.* (failed|closed)",
r"pacemaker-(attrd|controld).*Could not execute alert",
]
self.components["pacemaker-controld"] = [
# "WARN: determine_online_status: Node .* is unclean",
-# "Scheduling Node .* for STONITH",
+# "Scheduling node .* for fencing",
# Only if the node wasn't the DC: "State transition S_IDLE",
"State transition .* -> S_IDLE",
]
self.components["pacemaker-controld-ignore"] = []
self.components["pacemaker-attrd"] = []
self.components["pacemaker-attrd-ignore"] = []
self.components["pacemaker-schedulerd"] = [
"State transition .* S_RECOVERY",
r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit",
r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
r"Connection to the scheduler failed",
"pacemaker-controld.*I_ERROR.*save_cib_contents",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-schedulerd-ignore"] = [
r"Connection to pengine.* (failed|closed)",
]
self.components["pacemaker-fenced"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"Fencing daemon connection failed",
r"pacemaker-controld.*Fencer successfully connected",
]
self.components["pacemaker-fenced-ignore"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit:.*Fencing daemon connection failed",
r"error:.*Fencer connection failed \(will retry\)",
r"Connection to (fencer|stonith-ng) failed, finalizing .* pending operations",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["pacemaker-fenced-ignore"].extend(self.components["common-ignore"])
class crm_corosync_docker(crm_corosync):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
crm_corosync.__init__(self, name)
self.commands.update({
"StartCmd" : "pcmk_start",
"StopCmd" : "pcmk_stop",
})
class PatternSelector(object):
def __init__(self, name=None):
self.name = name
self.base = BasePatterns("crm-base")
if not name:
crm_corosync("crm-corosync")
elif name == "crm-corosync":
crm_corosync(name)
elif name == "crm-corosync-docker":
crm_corosync_docker(name)
def get_variant(self, variant):
if variant in patternvariants:
return patternvariants[variant]
print("defaulting to crm-base for %s" % variant)
return self.base
def get_patterns(self, variant, kind):
return self.get_variant(variant).get_patterns(kind)
def get_template(self, variant, key):
v = self.get_variant(variant)
return v[key]
def get_component(self, variant, kind):
return self.get_variant(variant).get_component(kind)
def __getitem__(self, key):
return self.get_template(self.name, key)
# python cts/CTSpatt.py -k crm-corosync -t StartCmd
if __name__ == '__main__':
pdir=os.path.dirname(sys.path[0])
sys.path.insert(0, pdir) # So that things work from the source directory
kind=None
template=None
skipthis=None
args=sys.argv[1:]
for i in range(0, len(args)):
if skipthis:
skipthis=None
continue
elif args[i] == "-k" or args[i] == "--kind":
skipthis=1
kind = args[i+1]
elif args[i] == "-t" or args[i] == "--template":
skipthis=1
template = args[i+1]
else:
print("Illegal argument " + args[i])
print(PatternSelector(kind)[template])
diff --git a/doc/sphinx/Pacemaker_Development/components.rst b/doc/sphinx/Pacemaker_Development/components.rst
index 42b8a5eecf..a0b904becf 100644
--- a/doc/sphinx/Pacemaker_Development/components.rst
+++ b/doc/sphinx/Pacemaker_Development/components.rst
@@ -1,383 +1,377 @@
Coding Particular Pacemaker Components
--------------------------------------
The Pacemaker code can be intricate and difficult to follow. This chapter has
some high-level descriptions of how individual components work.
.. index::
single: fencer
single: pacemaker-fenced
Fencer
######
``pacemaker-fenced`` is the Pacemaker daemon that handles fencing requests. In
the broadest terms, fencing works like this:
#. The initiator (an external program such as ``stonith_admin``, or the cluster
itself via the controller) asks the local fencer, "Hey, could you please
fence this node?"
#. The local fencer asks all the fencers in the cluster (including itself),
"Hey, what fencing devices do you have access to that can fence this node?"
#. Each fencer in the cluster replies with a list of available devices that
it knows about.
#. Once the original fencer gets all the replies, it asks the most
appropriate fencer peer to actually carry out the fencing. It may send
out more than one such request if the target node must be fenced with
multiple devices.
#. The chosen fencer(s) call the appropriate fencing resource agent(s) to
do the fencing, then reply to the original fencer with the result.
#. The original fencer broadcasts the result to all fencers.
#. Each fencer sends the result to each of its local clients (including, at
some point, the initiator).
A more detailed description follows.
.. index::
single: libstonithd
Initiating a fencing request
____________________________
A fencing request can be initiated by the cluster or externally, using the
libstonithd API.
* The cluster always initiates fencing via
``daemons/controld/controld_fencing.c:te_fence_node()`` (which calls the
``fence()`` API method). This occurs when a transition graph synapse contains
a ``CRM_OP_FENCE`` XML operation.
* The main external clients are ``stonith_admin`` and ``cts-fence-helper``.
The ``DLM`` project also uses Pacemaker for fencing.
Highlights of the fencing API:
* ``stonith_api_new()`` creates and returns a new ``stonith_t`` object, whose
``cmds`` member has methods for connect, disconnect, fence, etc.
* the ``fence()`` method creates and sends a ``STONITH_OP_FENCE XML`` request with
the desired action and target node. Callers do not have to choose or even
have any knowledge about particular fencing devices.
Fencing queries
_______________
The function calls for a fencing request go something like this:
The local fencer receives the client's request via an IPC or messaging
layer callback, which calls
* ``stonith_command()``, which (for requests) calls
* ``handle_request()``, which (for ``STONITH_OP_FENCE`` from a client) calls
* ``initiate_remote_stonith_op()``, which creates a ``STONITH_OP_QUERY`` XML
request with the target, desired action, timeout, etc. then broadcasts
the operation to the cluster group (i.e. all fencer instances) and
starts a timer. The query is broadcast because (1) location constraints
might prevent the local node from accessing the stonith device directly,
and (2) even if the local node does have direct access, another node
might be preferred to carry out the fencing.
Each fencer receives the original fencer's ``STONITH_OP_QUERY`` broadcast
request via IPC or messaging layer callback, which calls:
* ``stonith_command()``, which (for requests) calls
* ``handle_request()``, which (for ``STONITH_OP_QUERY`` from a peer) calls
* ``stonith_query()``, which calls
* ``get_capable_devices()`` with ``stonith_query_capable_device_cb()`` to add
device information to an XML reply and send it. (A message is
considered a reply if it contains ``T_STONITH_REPLY``, which is only
set by fencer peers, not clients.)
The original fencer receives all peers' ``STONITH_OP_QUERY`` replies via IPC
or messaging layer callback, which calls:
* ``stonith_command()``, which (for replies) calls
* ``handle_reply()`` which (for ``STONITH_OP_QUERY``) calls
* ``process_remote_stonith_query()``, which allocates a new query result
structure, parses device information into it, and adds it to the
operation object. It increments the number of replies received for this
operation, and compares it against the expected number of replies (i.e.
the number of active peers), and if this is the last expected reply,
calls
* ``request_peer_fencing()``, which calculates the timeout and sends
``STONITH_OP_FENCE`` request(s) to carry out the fencing. If the target
node has a fencing "topology" (which allows specifications such as
"this node can be fenced either with device A, or devices B and C in
combination"), it will choose the device(s), and send out as many
requests as needed. If it chooses a device, it will choose the peer; a
peer is preferred if it has "verified" access to the desired device,
meaning that it has the device "running" on it and thus has a monitor
operation ensuring reachability.
Fencing operations
__________________
Each ``STONITH_OP_FENCE`` request goes something like this:
The chosen peer fencer receives the ``STONITH_OP_FENCE`` request via IPC or
messaging layer callback, which calls:
* ``stonith_command()``, which (for requests) calls
* ``handle_request()``, which (for ``STONITH_OP_FENCE`` from a peer) calls
* ``stonith_fence()``, which calls
* ``schedule_stonith_command()`` (using supplied device if
``F_STONITH_DEVICE`` was set, otherwise the highest-priority capable
device obtained via ``get_capable_devices()`` with
``stonith_fence_get_devices_cb()``), which adds the operation to the
device's pending operations list and triggers processing.
The chosen peer fencer's mainloop is triggered and calls
* ``stonith_device_dispatch()``, which calls
* ``stonith_device_execute()``, which pops off the next item from the device's
pending operations list. If acting as the (internally implemented) watchdog
agent, it panics the node, otherwise it calls
* ``stonith_action_create()`` and ``stonith_action_execute_async()`` to
call the fencing agent.
The chosen peer fencer's mainloop is triggered again once the fencing agent
returns, and calls
* ``stonith_action_async_done()`` which adds the results to an action object
then calls its
* done callback (``st_child_done()``), which calls ``schedule_stonith_command()``
for a new device if there are further required actions to execute or if the
original action failed, then builds and sends an XML reply to the original
fencer (via ``send_async_reply()``), then checks whether any
pending actions are the same as the one just executed and merges them if so.
Fencing replies
_______________
The original fencer receives the ``STONITH_OP_FENCE`` reply via IPC or
messaging layer callback, which calls:
* ``stonith_command()``, which (for replies) calls
* ``handle_reply()``, which calls
* ``fenced_process_fencing_reply()``, which calls either
``request_peer_fencing()`` (to retry a failed operation, or try the next
device in a topology if appropriate, which issues a new
``STONITH_OP_FENCE`` request, proceeding as before) or
``finalize_op()`` (if the operation is definitively failed or
successful).
* ``finalize_op()`` broadcasts the result to all peers.
Finally, all peers receive the broadcast result and call
* ``finalize_op()``, which sends the result to all local clients.
.. index::
single: fence history
Fencing History
_______________
The fencer keeps a running history of all fencing operations. The bulk of the
relevant code is in `fenced_history.c` and ensures the history is synchronized
across all nodes even if a node leaves and rejoins the cluster.
In libstonithd, this information is represented by `stonith_history_t` and is
queryable by the `stonith_api_operations_t:history()` method. `crm_mon` and
`stonith_admin` use this API to display the history.
.. index::
single: scheduler
single: pacemaker-schedulerd
single: libpe_status
single: libpe_rules
single: libpacemaker
Scheduler
#########
``pacemaker-schedulerd`` is the Pacemaker daemon that runs the Pacemaker
scheduler for the controller, but "the scheduler" in general refers to related
library code in ``libpe_status`` and ``libpe_rules`` (``lib/pengine/*.c``), and
some of ``libpacemaker`` (``lib/pacemaker/pcmk_sched_*.c``).
The purpose of the scheduler is to take a CIB as input and generate a
transition graph (list of actions that need to be taken) as output.
The controller invokes the scheduler by contacting the scheduler daemon via
local IPC. Tools such as ``crm_simulate``, ``crm_mon``, and ``crm_resource``
can also invoke the scheduler, but do so by calling the library functions
directly. This allows them to run using a ``CIB_file`` without the cluster
needing to be active.
The main entry point for the scheduler code is
-``lib/pacemaker/pcmk_sched_messages.c:pcmk__schedule_actions()``. It sets
-defaults and calls a series of functions for each "stage" of the scheduling.
-(Some of the functions are named like ``stageN()`` but the code has evolved
-over time to where the numbers no longer make sense. A project is in progress
-to reorganize and rename them.)
-
-* ``stage0()`` "unpacks" most of the CIB XML into data structures, and
- determines the current cluster status. It also creates implicit location
- constraints for the node health feature.
-* ``stage2()`` applies factors that make resources prefer certain nodes (such
- as shutdown locks, location constraints, and stickiness).
-* ``pcmk__create_internal_constraints()`` creates internal constraints (such as
+``lib/pacemaker/pcmk_sched_allocate.c:pcmk__schedule_actions()``. It sets
+defaults and calls a series of functions for the scheduling. Some key steps:
+
+* ``unpack_cib()`` parses most of the CIB XML into data structures, and
+ determines the current cluster status.
+* ``apply_node_criteria()`` applies factors that make resources prefer certain
+ nodes, such as shutdown locks, location constraints, and stickiness.
+* ``pcmk__create_internal_constraints()`` creates internal constraints, such as
the implicit ordering for group members, or start actions being implicitly
- ordered before promote actions).
-* ``stage4()`` "checks actions", which means processing resource history
- entries in the CIB status section. This is used to decide whether certain
+ ordered before promote actions.
+* ``pcmk__handle_rsc_config_changes()`` processes resource history entries in
+ the CIB status section. This is used to decide whether certain
actions need to be done, such as deleting orphan resources, forcing a restart
when a resource definition changes, etc.
-* ``stage5()`` allocates resources to nodes and creates actions (which might or
- might not end up in the final graph).
-* ``stage6()`` creates implicit ordering constraints for resources running
- across remote connections, and schedules fencing actions and shutdowns.
-* ``stage7()`` "updates actions", which means applying ordering constraints in
- order to modify action attributes such as optional or required.
+* ``allocate_resources()`` assigns resources to nodes.
+* ``schedule_resource_actions()`` schedules resource-specific actions (which
+ might or might not end up in the final graph).
+* ``pcmk__apply_orderings()`` processes ordering constraints in order to modify
+ action attributes such as optional or required.
* ``pcmk__create_graph()`` creates the transition graph.
Challenges
__________
Working with the scheduler is difficult. Challenges include:
* It is far too much code to keep more than a small portion in your head at one
time.
* Small changes can have large (and unexpected) effects. This is why we have a
large number of regression tests (``cts/cts-scheduler``), which should be run
after making code changes.
* It produces an insane amount of log messages at debug and trace levels.
You can put resource ID(s) in the ``PCMK_trace_tags`` environment variable to
enable trace-level messages only when related to specific resources.
* Different parts of the main ``pe_working_set_t`` structure are finalized at
different points in the scheduling process, so you have to keep in mind
whether information you're using at one point of the code can possibly change
later. For example, data unpacked from the CIB can safely be used anytime
- after ``stage0(),`` but actions may become optional or required anytime
+ after ``unpack_cib(),`` but actions may become optional or required anytime
before ``pcmk__create_graph()``. There's no easy way to deal with this.
* Many names of struct members, functions, etc., are suboptimal, but are part
of the public API and cannot be changed until an API backward compatibility
break.
.. index::
single: pe_working_set_t
Cluster Working Set
___________________
The main data object for the scheduler is ``pe_working_set_t``, which contains
all information needed about nodes, resources, constraints, etc., both as the
raw CIB XML and parsed into more usable data structures, plus the resulting
transition graph XML. The variable name is usually ``data_set``.
.. index::
single: pe_resource_t
Resources
_________
``pe_resource_t`` is the data object representing cluster resources. A resource
has a variant: primitive (a.k.a. native), group, clone, or bundle.
The resource object has members for two sets of methods,
``resource_object_functions_t`` from the ``libpe_status`` public API, and
``resource_alloc_functions_t`` whose implementation is internal to
``libpacemaker``. The actual functions vary by variant.
The object functions have basic capabilities such as unpacking the resource
XML, and determining the current or planned location of the resource.
The allocation functions have more obscure capabilities needed for scheduling,
such as processing location and ordering constraints. For example,
-``stage3()``, which creates internal constraints, simply calls the
-``internal_constraints()`` method for each top-level resource in the working
-set.
+``pcmk__create_internal_constraints()`` simply calls the
+``internal_constraints()`` method for each top-level resource in the cluster.
.. index::
single: pe_node_t
Nodes
_____
Allocation of resources to nodes is done by choosing the node with the highest
score for a given resource. The scheduler does a bunch of processing to
generate the scores, then the actual allocation is straightforward.
Node lists are frequently used. For example, ``pe_working_set_t`` has a
``nodes`` member which is a list of all nodes in the cluster, and
``pe_resource_t`` has a ``running_on`` member which is a list of all nodes on
which the resource is (or might be) active. These are lists of ``pe_node_t``
objects.
The ``pe_node_t`` object contains a ``struct pe_node_shared_s *details`` member
with all node information that is independent of resource allocation (the node
name, etc.).
The working set's ``nodes`` member contains the original of this information.
All other node lists contain copies of ``pe_node_t`` where only the ``details``
member points to the originals in the working set's ``nodes`` list. In this
way, the other members of ``pe_node_t`` (such as ``weight``, which is the node
score) may vary by node list, while the common details are shared.
.. index::
single: pe_action_t
single: pe_action_flags
Actions
_______
``pe_action_t`` is the data object representing actions that might need to be
taken. These could be resource actions, cluster-wide actions such as fencing a
node, or "pseudo-actions" which are abstractions used as convenient points for
ordering other actions against.
It has a ``flags`` member which is a bitmask of ``enum pe_action_flags``. The
most important of these are ``pe_action_runnable`` (if not set, the action is
"blocked" and cannot be added to the transition graph) and
``pe_action_optional`` (actions with this set will not be added to the
transition graph; actions often start out as optional, and may become required
later).
.. index::
single: pe__ordering_t
single: pe_ordering
Orderings
_________
Ordering constraints are simple in concept, but they are one of the most
important, powerful, and difficult to follow aspects of the scheduler code.
``pe__ordering_t`` is the data object representing an ordering, better thought
of as a relationship between two actions, since the relation can be more
complex than just "this one runs after that one".
For an ordering "A then B", the code generally refers to A as "first" or
"before", and B as "then" or "after".
Much of the power comes from ``enum pe_ordering``, which are flags that
determine how an ordering behaves. There are many obscure flags with big
effects. A few examples:
* ``pe_order_none`` means the ordering is disabled and will be ignored. It's 0,
meaning no flags set, so it must be compared with equality rather than
``pcmk_is_set()``.
* ``pe_order_optional`` means the ordering does not make either action
required, so it only applies if they both become required for other reasons.
* ``pe_order_implies_first`` means that if action B becomes required for any
reason, then action A will become required as well.
diff --git a/include/pcmki/pcmki_scheduler.h b/include/pcmki/pcmki_scheduler.h
index 39b99cbad5..58cbe9544b 100644
--- a/include/pcmki/pcmki_scheduler.h
+++ b/include/pcmki/pcmki_scheduler.h
@@ -1,98 +1,95 @@
/*
* Copyright 2014-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__PCMKI_PCMKI_SCHEDULER__H
# define PCMK__PCMKI_PCMKI_SCHEDULER__H
typedef struct rsc_ticket_s rsc_ticket_t;
# include
# include
# include
# include
# include
# include
# include
enum pe_weights {
pe_weights_none = 0x0,
pe_weights_init = 0x1,
pe_weights_forward = 0x4,
pe_weights_positive = 0x8,
pe_weights_rollback = 0x10,
};
typedef struct {
const char *id;
const char *node_attribute;
pe_resource_t *dependent; // The resource being colocated
pe_resource_t *primary; // The resource the dependent is colocated with
int dependent_role; // Colocation applies only if dependent has this role
int primary_role; // Colocation applies only if primary has this role
int score;
bool influence; // Whether dependent influences active primary placement
} pcmk__colocation_t;
enum loss_ticket_policy_e {
loss_ticket_stop,
loss_ticket_demote,
loss_ticket_fence,
loss_ticket_freeze
};
struct rsc_ticket_s {
const char *id;
pe_resource_t *rsc_lh;
pe_ticket_t *ticket;
enum loss_ticket_policy_e loss_policy;
int role_lh;
};
-extern gboolean stage5(pe_working_set_t * data_set);
-extern gboolean stage6(pe_working_set_t * data_set);
-
void pcmk__unpack_constraints(pe_working_set_t *data_set);
extern void add_maintenance_update(pe_working_set_t *data_set);
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
pe_working_set_t *data_set);
extern const char *transition_idle_timeout;
/*!
* \internal
* \brief Check whether colocation's left-hand preferences should be considered
*
* \param[in] colocation Colocation constraint
* \param[in] rsc Right-hand instance (normally this will be
* colocation->primary, which NULL will be treated as,
* but for clones or bundles with multiple instances
* this can be a particular instance)
*
* \return true if colocation influence should be effective, otherwise false
*/
static inline bool
pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
const pe_resource_t *rsc)
{
if (rsc == NULL) {
rsc = colocation->primary;
}
/* The left hand of a colocation influences the right hand's location
* if the influence option is true, or the right hand is not yet active.
*/
return colocation->influence || (rsc->running_on == NULL);
}
#endif
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index a46aa34291..e39e84d87b 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -1,390 +1,397 @@
/*
* Copyright 2021-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__LIBPACEMAKER_PRIVATE__H
# define PCMK__LIBPACEMAKER_PRIVATE__H
/* This header is for the sole use of libpacemaker, so that functions can be
* declared with G_GNUC_INTERNAL for efficiency.
*/
#include // pe_action_t, pe_node_t, pe_working_set_t
// Actions (pcmk_sched_actions.c)
G_GNUC_INTERNAL
void pcmk__update_action_for_orderings(pe_action_t *action,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__log_action(const char *pre_text, pe_action_t *action, bool details);
G_GNUC_INTERNAL
pe_action_t *pcmk__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
bool optional, bool runnable);
G_GNUC_INTERNAL
pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name,
guint interval_ms, pe_node_t *node);
G_GNUC_INTERNAL
pe_action_t *pcmk__new_shutdown_action(pe_node_t *node,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__action_locks_rsc_to_node(const pe_action_t *action);
G_GNUC_INTERNAL
void pcmk__deduplicate_action_inputs(pe_action_t *action);
G_GNUC_INTERNAL
void pcmk__output_actions(pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
xmlNode *xml_op);
G_GNUC_INTERNAL
void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set);
// Producing transition graphs (pcmk_graph_producer.c)
G_GNUC_INTERNAL
bool pcmk__graph_has_loop(pe_action_t *init_action, pe_action_t *action,
pe_action_wrapper_t *input);
G_GNUC_INTERNAL
void pcmk__add_action_to_graph(pe_action_t *action, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__create_graph(pe_working_set_t *data_set);
// Fencing (pcmk_sched_fencing.c)
G_GNUC_INTERNAL
void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node,
pe_action_t *action, enum pe_ordering order,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__fence_guest(pe_node_t *node, pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__node_unfenced(pe_node_t *node);
G_GNUC_INTERNAL
bool pcmk__is_unfence_device(const pe_resource_t *rsc,
const pe_working_set_t *data_set);
// Injected scheduler inputs (pcmk_sched_injections.c)
void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
pcmk_injections_t *injections);
// Constraints of any type (pcmk_sched_constraints.c)
G_GNUC_INTERNAL
pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id);
G_GNUC_INTERNAL
xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__valid_resource_or_tag(pe_working_set_t *data_set, const char *id,
pe_resource_t **rsc, pe_tag_t **tag);
G_GNUC_INTERNAL
bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
bool convert_rsc, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__create_internal_constraints(pe_working_set_t *data_set);
// Location constraints
G_GNUC_INTERNAL
void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc,
int node_weight, const char *discover_mode,
pe_node_t *foo_node,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__apply_locations(pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__apply_location(pe__location_t *constraint, pe_resource_t *rsc);
// Colocation constraints
enum pcmk__coloc_affects {
pcmk__coloc_affects_nothing = 0,
pcmk__coloc_affects_location,
pcmk__coloc_affects_role,
};
G_GNUC_INTERNAL
enum pcmk__coloc_affects pcmk__colocation_affects(pe_resource_t *dependent,
pe_resource_t *primary,
pcmk__colocation_t *constraint,
bool preview);
G_GNUC_INTERNAL
void pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
pe_resource_t *primary,
pcmk__colocation_t *constraint);
G_GNUC_INTERNAL
void pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
pe_resource_t *primary,
pcmk__colocation_t *constraint);
G_GNUC_INTERNAL
void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__new_colocation(const char *id, const char *node_attr, int score,
pe_resource_t *dependent, pe_resource_t *primary,
const char *dependent_role, const char *primary_role,
bool influence, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__block_colocated_starts(pe_action_t *action,
pe_working_set_t *data_set);
+
+// Ordering constraints (pcmk_sched_ordering.c)
+
G_GNUC_INTERNAL
void pcmk__new_ordering(pe_resource_t *lh_rsc, char *lh_task,
pe_action_t *lh_action, pe_resource_t *rh_rsc,
char *rh_task, pe_action_t *rh_action,
enum pe_ordering type, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__disable_invalid_orderings(pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__order_stops_before_shutdown(pe_node_t *node,
pe_action_t *shutdown_op,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__apply_orderings(pe_working_set_t *data_set);
+G_GNUC_INTERNAL
+void pcmk__order_after_each(pe_action_t *after, GList *list);
+
+
/*!
* \internal
* \brief Create a new ordering between two resource actions
*
* \param[in] lh_rsc Resource for 'first' action
* \param[in] rh_rsc Resource for 'then' action
* \param[in] lh_task Action key for 'first' action
* \param[in] rh_task Action key for 'then' action
* \param[in] flags Bitmask of enum pe_ordering flags
* \param[in] data_set Cluster working set to add ordering to
*/
#define pcmk__order_resource_actions(lh_rsc, lh_task, rh_rsc, rh_task, \
flags, data_set) \
pcmk__new_ordering((lh_rsc), pcmk__op_key((lh_rsc)->id, (lh_task), 0), \
NULL, \
(rh_rsc), pcmk__op_key((rh_rsc)->id, (rh_task), 0), \
NULL, (flags), (data_set))
#define pcmk__order_starts(rsc1, rsc2, type, data_set) \
pcmk__order_resource_actions((rsc1), CRMD_ACTION_START, \
(rsc2), CRMD_ACTION_START, (type), (data_set))
#define pcmk__order_stops(rsc1, rsc2, type, data_set) \
pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP, \
(rsc2), CRMD_ACTION_STOP, (type), (data_set))
G_GNUC_INTERNAL
void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__is_failed_remote_node(pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__order_remote_connection_actions(pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__rsc_corresponds_to_guest(pe_resource_t *rsc, pe_node_t *node);
G_GNUC_INTERNAL
pe_node_t *pcmk__connection_host_for_action(pe_action_t *action);
G_GNUC_INTERNAL
void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, pe_action_t *action);
// Groups (pcmk_sched_group.c)
G_GNUC_INTERNAL
GList *pcmk__group_colocated_resources(pe_resource_t *rsc,
pe_resource_t *orig_rsc,
GList *colocated_rscs);
// Bundles (pcmk_sched_bundle.c)
G_GNUC_INTERNAL
void pcmk__output_bundle_actions(pe_resource_t *rsc);
// Injections (pcmk_injections.c)
G_GNUC_INTERNAL
xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node,
bool up);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
const char *resource,
const char *lrm_name,
const char *rclass,
const char *rtype,
const char *rprovider);
G_GNUC_INTERNAL
void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node,
const char *resource, const char *task,
guint interval_ms, int rc);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
lrmd_event_data_t *op, int target_rc);
// Nodes (pcmk_sched_nodes.c)
G_GNUC_INTERNAL
bool pcmk__node_available(const pe_node_t *node);
G_GNUC_INTERNAL
bool pcmk__any_node_available(GHashTable *nodes);
G_GNUC_INTERNAL
GHashTable *pcmk__copy_node_table(GHashTable *nodes);
G_GNUC_INTERNAL
GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__apply_node_health(pe_working_set_t *data_set);
// Clone notifictions (pcmk_sched_notif.c)
G_GNUC_INTERNAL
void pcmk__create_notifications(pe_resource_t *rsc, notify_data_t *n_data);
G_GNUC_INTERNAL
notify_data_t *pcmk__clone_notif_pseudo_ops(pe_resource_t *rsc,
const char *task,
pe_action_t *action,
pe_action_t *complete);
G_GNUC_INTERNAL
void pcmk__free_notification_data(notify_data_t *n_data);
G_GNUC_INTERNAL
void pcmk__order_notifs_after_fencing(pe_action_t *action, pe_resource_t *rsc,
pe_action_t *stonith_op);
// Functions applying to more than one variant (pcmk_sched_resource.c)
G_GNUC_INTERNAL
void pcmk__set_allocation_methods(pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
const xmlNode *rsc_entry, bool active_on_node);
G_GNUC_INTERNAL
GList *pcmk__rscs_matching_id(const char *id, pe_working_set_t *data_set);
G_GNUC_INTERNAL
GList *pcmk__colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *colocated_rscs);
G_GNUC_INTERNAL
void pcmk__output_resource_actions(pe_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__assign_primitive(pe_resource_t *rsc, pe_node_t *chosen, bool force);
G_GNUC_INTERNAL
bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force);
G_GNUC_INTERNAL
void pcmk__unassign_resource(pe_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node,
pe_resource_t **failed);
G_GNUC_INTERNAL
void pcmk__sort_resources(pe_working_set_t *data_set);
// Functions related to probes (pcmk_sched_probes.c)
G_GNUC_INTERNAL
void pcmk__order_probes(pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__schedule_probes(pe_working_set_t *data_set);
// Functions related to node utilization (pcmk_sched_utilization.c)
G_GNUC_INTERNAL
int pcmk__compare_node_capacities(const pe_node_t *node1,
const pe_node_t *node2);
G_GNUC_INTERNAL
void pcmk__consume_node_capacity(GHashTable *current_utilization,
pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__release_node_capacity(GHashTable *current_utilization,
pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__ban_insufficient_capacity(pe_resource_t *rsc, pe_node_t **prefer,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__create_utilization_constraints(pe_resource_t *rsc,
GList *allowed_nodes);
G_GNUC_INTERNAL
void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set);
#endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c
index 275edb6a30..7f3ae4c6a1 100644
--- a/lib/pacemaker/pcmk_sched_allocate.c
+++ b/lib/pacemaker/pcmk_sched_allocate.c
@@ -1,752 +1,795 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "libpacemaker_private.h"
CRM_TRACE_INIT_DATA(pacemaker);
/*!
* \internal
* \brief Do deferred action checks after allocation
*
* When unpacking the resource history, the scheduler checks for resource
* configurations that have changed since an action was run. However, at that
* time, bundles using the REMOTE_CONTAINER_HACK don't have their final
* parameter information, so instead they add a deferred check to a list. This
* function processes one entry in that list.
*
* \param[in] rsc Resource that action history is for
* \param[in] node Node that action history is for
* \param[in] rsc_op Action history entry
* \param[in] check Type of deferred check to do
* \param[in] data_set Working set for cluster
*/
static void
check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
enum pe_check_parameters check, pe_working_set_t *data_set)
{
const char *reason = NULL;
op_digest_cache_t *digest_data = NULL;
switch (check) {
case pe_check_active:
if (pcmk__check_action_config(rsc, node, rsc_op)
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
reason = "action definition changed";
}
break;
case pe_check_last_failure:
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
switch (digest_data->rc) {
case RSC_DIGEST_UNKNOWN:
crm_trace("Resource %s history entry %s on %s has "
"no digest to compare",
rsc->id, ID(rsc_op), node->details->id);
break;
case RSC_DIGEST_MATCH:
break;
default:
reason = "resource parameters have changed";
break;
}
break;
}
if (reason != NULL) {
pe__clear_failcount(rsc, node, reason, data_set);
}
}
/*!
* \internal
* \brief Check whether a resource has failcount clearing scheduled on a node
*
* \param[in] node Node to check
* \param[in] rsc Resource to check
*
* \return true if \p rsc has failcount clearing scheduled on \p node,
* otherwise false
*/
static bool
failcount_clear_action_exists(pe_node_t *node, pe_resource_t *rsc)
{
GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
if (list != NULL) {
g_list_free(list);
return true;
}
return false;
}
/*!
* \internal
* \brief Ban a resource from a node if it reached its failure threshold there
*
* \param[in] rsc Resource to check failure threshold for
* \param[in] node Node to check \p rsc on
*/
static void
check_failure_threshold(pe_resource_t *rsc, pe_node_t *node)
{
// If this is a collective resource, apply recursively to children instead
if (rsc->children != NULL) {
g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
node);
return;
} else if (failcount_clear_action_exists(node, rsc)) {
/* Don't force the resource away from this node due to a failcount
* that's going to be cleared.
*
* @TODO Failcount clearing can be scheduled in
* pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
- * stage5() via check_params(). This runs well before then, so it cannot
- * detect those, meaning we might check the migration threshold when we
- * shouldn't. Worst case, we stop or move the resource, then move it
- * back in the next transition.
+ * schedule_resource_actions() via check_params(). This runs well before
+ * then, so it cannot detect those, meaning we might check the migration
+ * threshold when we shouldn't. Worst case, we stop or move the
+ * resource, then move it back in the next transition.
*/
return;
} else {
pe_resource_t *failed = NULL;
if (pcmk__threshold_reached(rsc, node, &failed)) {
resource_location(failed, node, -INFINITY, "__fail_limit__",
rsc->cluster);
}
}
}
/*!
* \internal
* \brief If resource has exclusive discovery, ban node if not allowed
*
* Location constraints have a resource-discovery option that allows users to
* specify where probes are done for the affected resource. If this is set to
* exclusive, probes will only be done on nodes listed in exclusive constraints.
* This function bans the resource from the node if the node is not listed.
*
* \param[in] rsc Resource to check
* \param[in] node Node to check \p rsc on
*/
static void
apply_exclusive_discovery(pe_resource_t *rsc, pe_node_t *node)
{
if (rsc->exclusive_discover || uber_parent(rsc)->exclusive_discover) {
pe_node_t *match = NULL;
// If this is a collective resource, apply recursively to children
g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery, node);
match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if ((match != NULL)
&& (match->rsc_discover_mode != pe_discover_exclusive)) {
match->weight = -INFINITY;
}
}
}
/*!
* \internal
* \brief Apply stickiness to a resource if appropriate
*
* \param[in] rsc Resource to check for stickiness
* \param[in] data_set Cluster working set
*/
static void
apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
{
pe_node_t *node = NULL;
// If this is a collective resource, apply recursively to children instead
if (rsc->children != NULL) {
g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
return;
}
/* A resource is sticky if it is managed, has stickiness configured, and is
* active on a single node.
*/
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
|| (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
return;
}
node = rsc->running_on->data;
/* In a symmetric cluster, stickiness can always be used. In an
* asymmetric cluster, we have to check whether the resource is still
* allowed on the node, so we don't keep the resource somewhere it is no
* longer explicitly enabled.
*/
if (!pcmk_is_set(rsc->cluster->flags, pe_flag_symmetric_cluster)
&& (pe_hash_table_lookup(rsc->allowed_nodes,
node->details->id) == NULL)) {
pe_rsc_debug(rsc,
"Ignoring %s stickiness because the cluster is "
"asymmetric and node %s is not explicitly allowed",
rsc->id, node->details->uname);
return;
}
pe_rsc_debug(rsc, "Resource %s has %d stickiness on node %s",
rsc->id, rsc->stickiness, node->details->uname);
resource_location(rsc, node, rsc->stickiness, "stickiness",
rsc->cluster);
}
/*!
* \internal
* \brief Apply shutdown locks for all resources as appropriate
*
* \param[in] data_set Cluster working set
*/
static void
apply_shutdown_locks(pe_working_set_t *data_set)
{
if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
return;
}
for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
pe_resource_t *rsc = (pe_resource_t *) iter->data;
rsc->cmds->shutdown_lock(rsc);
}
}
/*!
* \internal
* \brief Calculate the number of available nodes in the cluster
*
* \param[in] data_set Cluster working set
*/
static void
count_available_nodes(pe_working_set_t *data_set)
{
if (pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
return;
}
// @COMPAT for API backward compatibility only (cluster does not use value)
for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
pe_node_t *node = (pe_node_t *) iter->data;
if ((node != NULL) && (node->weight >= 0) && node->details->online
&& (node->details->type != node_ping)) {
data_set->max_valid_nodes++;
}
}
crm_trace("Online node count: %d", data_set->max_valid_nodes);
}
/*
* \internal
* \brief Apply node-specific scheduling criteria
*
* After the CIB has been unpacked, process node-specific scheduling criteria
* including shutdown locks, location constraints, resource stickiness,
* migration thresholds, and exclusive resource discovery.
*/
static void
apply_node_criteria(pe_working_set_t *data_set)
{
crm_trace("Applying node-specific scheduling criteria");
apply_shutdown_locks(data_set);
count_available_nodes(data_set);
pcmk__apply_locations(data_set);
g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
for (GList *node_iter = data_set->nodes; node_iter != NULL;
node_iter = node_iter->next) {
for (GList *rsc_iter = data_set->resources; rsc_iter != NULL;
rsc_iter = rsc_iter->next) {
pe_node_t *node = (pe_node_t *) node_iter->data;
pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
check_failure_threshold(rsc, node);
apply_exclusive_discovery(rsc, node);
}
}
}
/*!
* \internal
* \brief Allocate resources to nodes
*
* \param[in] data_set Cluster working set
*/
static void
allocate_resources(pe_working_set_t *data_set)
{
GList *iter = NULL;
crm_trace("Allocating resources to nodes");
if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
pcmk__sort_resources(data_set);
}
pcmk__show_node_capacities("Original", data_set);
if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
/* Allocate remote connection resources first (which will also allocate
* any colocation dependencies). If the connection is migrating, always
* prefer the partial migration target.
*/
for (iter = data_set->resources; iter != NULL; iter = iter->next) {
pe_resource_t *rsc = (pe_resource_t *) iter->data;
if (rsc->is_remote_node) {
pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
rsc->id);
rsc->cmds->allocate(rsc, rsc->partial_migration_target,
data_set);
}
}
}
/* now do the rest of the resources */
for (iter = data_set->resources; iter != NULL; iter = iter->next) {
pe_resource_t *rsc = (pe_resource_t *) iter->data;
if (!rsc->is_remote_node) {
pe_rsc_trace(rsc, "Allocating %s resource '%s'",
crm_element_name(rsc->xml), rsc->id);
rsc->cmds->allocate(rsc, NULL, data_set);
}
}
pcmk__show_node_capacities("Remaining", data_set);
}
/*!
* \internal
* \brief Schedule fail count clearing on online nodes if resource is orphaned
*
* \param[in] rsc Resource to check
* \param[in] data_set Cluster working set
*/
static void
clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
return;
}
crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
/* There's no need to recurse into rsc->children because those
* should just be unallocated clone instances.
*/
for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
pe_node_t *node = (pe_node_t *) iter->data;
pe_action_t *clear_op = NULL;
if (!node->details->online) {
continue;
}
if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set) == 0) {
continue;
}
clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set);
/* We can't use order_action_then_stop() here because its
* pe_order_preserve breaks things
*/
pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
NULL, pe_order_optional, data_set);
}
}
-gboolean
-stage5(pe_working_set_t * data_set)
+/*!
+ * \internal
+ * \brief Schedule any resource actions needed
+ *
+ * \param[in] data_set Cluster working set
+ */
+static void
+schedule_resource_actions(pe_working_set_t *data_set)
{
- GList *gIter = NULL;
-
// Process deferred action checks
pe__foreach_param_check(data_set, check_params);
pe__free_param_checks(data_set);
if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
- crm_trace("Calculating needed probes");
+ crm_trace("Scheduling probes");
pcmk__schedule_probes(data_set);
}
if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
g_list_foreach(data_set->resources,
(GFunc) clear_failcounts_if_orphaned, data_set);
}
- crm_trace("Creating actions");
-
- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ crm_trace("Scheduling resource actions");
+ for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
rsc->cmds->create_actions(rsc, data_set);
}
-
- crm_trace("Creating done");
- return TRUE;
}
-static gboolean
-is_managed(const pe_resource_t * rsc)
+/*!
+ * \internal
+ * \brief Check whether a resource or any of its descendants are managed
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return true if resource or any descendent is managed, otherwise false
+ */
+static bool
+is_managed(const pe_resource_t *rsc)
{
- GList *gIter = rsc->children;
-
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
- return TRUE;
+ return true;
}
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ if (is_managed((pe_resource_t *) iter->data)) {
+ return true;
+ }
+ }
+ return false;
+}
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
-
- if (is_managed(child_rsc)) {
- return TRUE;
+/*!
+ * \internal
+ * \brief Check whether any resources in the cluster are managed
+ *
+ * \param[in] data_set Cluster working set
+ *
+ * \return true if any resource is managed, otherwise false
+ */
+static bool
+any_managed_resources(pe_working_set_t *data_set)
+{
+ for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
+ if (is_managed((pe_resource_t *) iter->data)) {
+ return true;
}
}
+ return false;
+}
- return FALSE;
+/*!
+ * \internal
+ * \brief Check whether a node requires fencing
+ *
+ * \param[in] node Node to check
+ * \param[in] have_managed Whether any resource in cluster is managed
+ * \param[in] data_set Cluster working set
+ *
+ * \return true if \p node should be fenced, otherwise false
+ */
+static bool
+needs_fencing(pe_node_t *node, bool have_managed, pe_working_set_t *data_set)
+{
+ return have_managed && node->details->unclean
+ && pe_can_fence(data_set, node);
}
-static gboolean
-any_managed_resources(pe_working_set_t * data_set)
+/*!
+ * \internal
+ * \brief Check whether a node requires shutdown
+ *
+ * \param[in] node Node to check
+ *
+ * \return true if \p node should be shut down, otherwise false
+ */
+static bool
+needs_shutdown(pe_node_t *node)
{
+ if (pe__is_guest_or_remote_node(node)) {
+ /* Do not send shutdown actions for Pacemaker Remote nodes.
+ * @TODO We might come up with a good use for this in the future.
+ */
+ return false;
+ }
+ return node->details->online && node->details->shutdown;
+}
- GList *gIter = data_set->resources;
+/*!
+ * \internal
+ * \brief Track and order non-DC fencing
+ *
+ * \param[in] list List of existing non-DC fencing actions
+ * \param[in] action Fencing action to prepend to \p list
+ *
+ * \return (Possibly new) head of \p list
+ */
+static GList *
+add_nondc_fencing(GList *list, pe_action_t *action, pe_working_set_t *data_set)
+{
+ if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
+ && (list != NULL)) {
+ /* Concurrent fencing is disabled, so order each non-DC
+ * fencing in a chain. If there is any DC fencing or
+ * shutdown, it will be ordered after the last action in the
+ * chain later.
+ */
+ order_actions((pe_action_t *) list->data, action, pe_order_optional);
+ }
+ return g_list_prepend(list, action);
+}
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+/*!
+ * \internal
+ * \brief Schedule a node for fencing
+ *
+ * \param[in] node Node that requires fencing
+ * \param[in] data_set Cluster working set
+ */
+static pe_action_t *
+schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
+{
+ pe_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
+ FALSE, data_set);
- if (is_managed(rsc)) {
- return TRUE;
- }
- }
- return FALSE;
+ pe_warn("Scheduling node %s for fencing", node->details->uname);
+ pcmk__order_vs_fence(fencing, data_set);
+ return fencing;
}
-/*
- * Create dependencies for stonith and shutdown operations
+/*!
+ * \internal
+ * \brief Create and order node fencing and shutdown actions
+ *
+ * \param[in] data_set Cluster working set
*/
-gboolean
-stage6(pe_working_set_t * data_set)
+static void
+schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
{
pe_action_t *dc_down = NULL;
- pe_action_t *stonith_op = NULL;
- gboolean integrity_lost = FALSE;
- gboolean need_stonith = TRUE;
- GList *gIter;
- GList *stonith_ops = NULL;
+ bool integrity_lost = false;
+ bool have_managed = any_managed_resources(data_set);
+ GList *fencing_ops = NULL;
GList *shutdown_ops = NULL;
- /* Remote ordering constraints need to happen prior to calculating fencing
- * because it is one more place we can mark nodes as needing fencing.
- */
- pcmk__order_remote_connection_actions(data_set);
-
- crm_trace("Processing fencing and shutdown cases");
- if (any_managed_resources(data_set) == FALSE) {
- crm_notice("Delaying fencing operations until there are resources to manage");
- need_stonith = FALSE;
+ crm_trace("Scheduling fencing and shutdowns as needed");
+ if (!have_managed) {
+ crm_notice("No fencing will be done until there are resources to manage");
}
- /* Check each node for stonith/shutdown */
- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ // Check each node for whether it needs fencing or shutdown
+ for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
+ pe_node_t *node = (pe_node_t *) iter->data;
+ pe_action_t *fencing = NULL;
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (pe__is_guest_node(node)) {
- if (node->details->remote_requires_reset && need_stonith
+ if (node->details->remote_requires_reset && have_managed
&& pe_can_fence(data_set, node)) {
pcmk__fence_guest(node, data_set);
}
continue;
}
- stonith_op = NULL;
-
- if (node->details->unclean
- && need_stonith && pe_can_fence(data_set, node)) {
-
- stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", FALSE, data_set);
- pe_warn("Scheduling Node %s for STONITH", node->details->uname);
-
- pcmk__order_vs_fence(stonith_op, data_set);
+ if (needs_fencing(node, have_managed, data_set)) {
+ fencing = schedule_fencing(node, data_set);
+ // Track DC and non-DC fence actions separately
if (node->details->is_dc) {
- // Remember if the DC is being fenced
- dc_down = stonith_op;
-
+ dc_down = fencing;
} else {
-
- if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
- && (stonith_ops != NULL)) {
- /* Concurrent fencing is disabled, so order each non-DC
- * fencing in a chain. If there is any DC fencing or
- * shutdown, it will be ordered after the last action in the
- * chain later.
- */
- order_actions((pe_action_t *) stonith_ops->data,
- stonith_op, pe_order_optional);
- }
-
- // Remember all non-DC fencing actions in a separate list
- stonith_ops = g_list_prepend(stonith_ops, stonith_op);
+ fencing_ops = add_nondc_fencing(fencing_ops, fencing, data_set);
}
- } else if (node->details->online && node->details->shutdown &&
- /* TODO define what a shutdown op means for a remote node.
- * For now we do not send shutdown operations for remote nodes, but
- * if we can come up with a good use for this in the future, we will. */
- pe__is_guest_or_remote_node(node) == FALSE) {
-
+ } else if (needs_shutdown(node)) {
pe_action_t *down_op = pcmk__new_shutdown_action(node, data_set);
+ // Track DC and non-DC shutdown actions separately
if (node->details->is_dc) {
- // Remember if the DC is being shut down
dc_down = down_op;
} else {
- // Remember non-DC shutdowns for later ordering
shutdown_ops = g_list_prepend(shutdown_ops, down_op);
}
}
- if (node->details->unclean && stonith_op == NULL) {
- integrity_lost = TRUE;
- pe_warn("Node %s is unclean!", node->details->uname);
+ if ((fencing == NULL) && node->details->unclean) {
+ integrity_lost = true;
+ pe_warn("Node %s is unclean but cannot be fenced",
+ node->details->uname);
}
}
if (integrity_lost) {
if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
- pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
+ pe_warn("Resource functionality and data integrity cannot be "
+ "guaranteed (configure, enable, and test fencing to "
+ "correct this)");
} else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
- crm_notice("Cannot fence unclean nodes until quorum is"
- " attained (or no-quorum-policy is set to ignore)");
+ crm_notice("Unclean nodes will not be fenced until quorum is "
+ "attained or no-quorum-policy is set to ignore");
}
}
if (dc_down != NULL) {
/* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
* DC elections. However, we don't want to order non-DC shutdowns before
* a DC *fencing*, because even though we don't want a node that's
* shutting down to become DC, the DC fencing could be ordered before a
* clone stop that's also ordered before the shutdowns, thus leading to
* a graph loop.
*/
- if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
- for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) {
- pe_action_t *node_stop = (pe_action_t *) gIter->data;
-
- crm_debug("Ordering shutdown on %s before %s on DC %s",
- node_stop->node->details->uname,
- dc_down->task, dc_down->node->details->uname);
-
- order_actions(node_stop, dc_down, pe_order_optional);
- }
+ if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
+ pcmk__order_after_each(dc_down, shutdown_ops);
}
// Order any non-DC fencing before any DC fencing or shutdown
if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
/* With concurrent fencing, order each non-DC fencing action
* separately before any DC fencing or shutdown.
*/
- for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) {
- order_actions((pe_action_t *) gIter->data, dc_down,
- pe_order_optional);
- }
- } else if (stonith_ops) {
+ pcmk__order_after_each(dc_down, fencing_ops);
+ } else if (fencing_ops != NULL) {
/* Without concurrent fencing, the non-DC fencing actions are
* already ordered relative to each other, so we just need to order
* the DC fencing after the last action in the chain (which is the
* first item in the list).
*/
- order_actions((pe_action_t *) stonith_ops->data, dc_down,
+ order_actions((pe_action_t *) fencing_ops->data, dc_down,
pe_order_optional);
}
}
- g_list_free(stonith_ops);
+ g_list_free(fencing_ops);
g_list_free(shutdown_ops);
- return TRUE;
}
static void
log_resource_details(pe_working_set_t *data_set)
{
pcmk__output_t *out = data_set->priv;
GList *all = NULL;
/* We need a list of nodes that we are allowed to output information for.
* This is necessary because out->message for all the resource-related
* messages expects such a list, due to the `crm_mon --node=` feature. Here,
* we just make it a list of all the nodes.
*/
all = g_list_prepend(all, (gpointer) "*");
for (GList *item = data_set->resources; item != NULL; item = item->next) {
pe_resource_t *rsc = (pe_resource_t *) item->data;
// Log all resources except inactive orphans
if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
|| (rsc->role != RSC_ROLE_STOPPED)) {
out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
}
}
g_list_free(all);
}
static void
log_all_actions(pe_working_set_t *data_set)
{
/* This only ever outputs to the log, so ignore whatever output object was
* previously set and just log instead.
*/
pcmk__output_t *prev_out = data_set->priv;
pcmk__output_t *out = pcmk__new_logger();
if (out == NULL) {
return;
}
pcmk__output_set_log_level(out, LOG_NOTICE);
data_set->priv = out;
out->begin_list(out, NULL, NULL, "Actions");
pcmk__output_actions(data_set);
out->end_list(out);
out->finish(out, CRM_EX_OK, true, NULL);
pcmk__output_free(out);
data_set->priv = prev_out;
}
/*!
* \internal
* \brief Log all required but unrunnable actions at trace level
*
* \param[in] data_set Cluster working set
*/
static void
log_unrunnable_actions(pe_working_set_t *data_set)
{
const uint64_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
crm_trace("Required but unrunnable actions:");
for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
pe_action_t *action = (pe_action_t *) iter->data;
if (!pcmk_any_flags_set(action->flags, flags)) {
pcmk__log_action("\t", action, true);
}
}
}
/*!
* \internal
* \brief Unpack the CIB for scheduling
*
* \param[in] cib CIB XML to unpack (may be NULL if previously unpacked)
* \param[in] flags Working set flags to set in addition to defaults
* \param[in] data_set Cluster working set
*/
static void
unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
{
if (pcmk_is_set(data_set->flags, pe_flag_have_status)) {
crm_trace("Reusing previously calculated cluster status");
pe__set_working_set_flags(data_set, flags);
return;
}
CRM_ASSERT(cib != NULL);
crm_trace("Calculating cluster status");
/* This will zero the entire struct without freeing anything first, so
* callers should never call pcmk__schedule_actions() with a populated data
* set unless pe_flag_have_status is set (i.e. cluster_status() was
* previously called, whether directly or via pcmk__schedule_actions()).
*/
set_working_set_defaults(data_set);
pe__set_working_set_flags(data_set, flags);
data_set->input = cib;
cluster_status(data_set); // Sets pe_flag_have_status
}
/*!
* \internal
* \brief Run the scheduler for a given CIB
*
* \param[in] cib CIB XML to use as scheduler input
* \param[in] flags Working set flags to set in addition to defaults
* \param[in,out] data_set Cluster working set
*/
void
pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
pe_working_set_t *data_set)
{
unpack_cib(cib, flags, data_set);
pcmk__set_allocation_methods(data_set);
pcmk__apply_node_health(data_set);
pcmk__unpack_constraints(data_set);
if (pcmk_is_set(data_set->flags, pe_flag_check_config)) {
return;
}
if (!pcmk_is_set(data_set->flags, pe_flag_quick_location) &&
pcmk__is_daemon) {
log_resource_details(data_set);
}
apply_node_criteria(data_set);
if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
return;
}
pcmk__create_internal_constraints(data_set);
pcmk__handle_rsc_config_changes(data_set);
-
allocate_resources(data_set);
+ schedule_resource_actions(data_set);
- stage5(data_set);
-
- crm_trace("Processing fencing and shutdown cases");
- stage6(data_set);
+ /* Remote ordering constraints need to happen prior to calculating fencing
+ * because it is one more place we can mark nodes as needing fencing.
+ */
+ pcmk__order_remote_connection_actions(data_set);
+ schedule_fencing_and_shutdowns(data_set);
pcmk__apply_orderings(data_set);
log_all_actions(data_set);
-
- crm_trace("Create transition graph");
pcmk__create_graph(data_set);
+
if (get_crm_log_level() == LOG_TRACE) {
log_unrunnable_actions(data_set);
}
}
diff --git a/lib/pacemaker/pcmk_sched_ordering.c b/lib/pacemaker/pcmk_sched_ordering.c
index be22111487..16ceb5aece 100644
--- a/lib/pacemaker/pcmk_sched_ordering.c
+++ b/lib/pacemaker/pcmk_sched_ordering.c
@@ -1,1542 +1,1565 @@
/*
- * Copyright 2004-2021 the Pacemaker project contributors
+ * Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include "libpacemaker_private.h"
enum pe_order_kind {
pe_order_kind_optional,
pe_order_kind_mandatory,
pe_order_kind_serialize,
};
enum ordering_symmetry {
ordering_asymmetric, // the only relation in an asymmetric ordering
ordering_symmetric, // the normal relation in a symmetric ordering
ordering_symmetric_inverse, // the inverse relation in a symmetric ordering
};
#define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \
__rsc = pcmk__find_constraint_resource(data_set->resources, __name); \
if (__rsc == NULL) { \
pcmk__config_err("%s: No resource found for %s", __set, __name); \
return pcmk_rc_schema_validation; \
} \
} while (0)
static const char *
invert_action(const char *action)
{
if (pcmk__str_eq(action, RSC_START, pcmk__str_casei)) {
return RSC_STOP;
} else if (pcmk__str_eq(action, RSC_STOP, pcmk__str_casei)) {
return RSC_START;
} else if (pcmk__str_eq(action, RSC_PROMOTE, pcmk__str_casei)) {
return RSC_DEMOTE;
} else if (pcmk__str_eq(action, RSC_DEMOTE, pcmk__str_casei)) {
return RSC_PROMOTE;
} else if (pcmk__str_eq(action, RSC_PROMOTED, pcmk__str_casei)) {
return RSC_DEMOTED;
} else if (pcmk__str_eq(action, RSC_DEMOTED, pcmk__str_casei)) {
return RSC_PROMOTED;
} else if (pcmk__str_eq(action, RSC_STARTED, pcmk__str_casei)) {
return RSC_STOPPED;
} else if (pcmk__str_eq(action, RSC_STOPPED, pcmk__str_casei)) {
return RSC_STARTED;
}
crm_warn("Unknown action '%s' specified in order constraint", action);
return NULL;
}
static enum pe_order_kind
get_ordering_type(xmlNode *xml_obj)
{
enum pe_order_kind kind_e = pe_order_kind_mandatory;
const char *kind = crm_element_value(xml_obj, XML_ORDER_ATTR_KIND);
if (kind == NULL) {
const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
kind_e = pe_order_kind_mandatory;
if (score) {
// @COMPAT deprecated informally since 1.0.7, formally since 2.0.1
int score_i = char2score(score);
if (score_i == 0) {
kind_e = pe_order_kind_optional;
}
pe_warn_once(pe_wo_order_score,
"Support for 'score' in rsc_order is deprecated "
"and will be removed in a future release "
"(use 'kind' instead)");
}
} else if (pcmk__str_eq(kind, "Mandatory", pcmk__str_casei)) {
kind_e = pe_order_kind_mandatory;
} else if (pcmk__str_eq(kind, "Optional", pcmk__str_casei)) {
kind_e = pe_order_kind_optional;
} else if (pcmk__str_eq(kind, "Serialize", pcmk__str_casei)) {
kind_e = pe_order_kind_serialize;
} else {
pcmk__config_err("Resetting '" XML_ORDER_ATTR_KIND "' for constraint "
"'%s' to Mandatory because '%s' is not valid",
crm_str(ID(xml_obj)), kind);
}
return kind_e;
}
/*!
* \internal
* \brief Get ordering symmetry from XML
*
* \param[in] xml_obj Ordering XML
* \param[in] parent_kind Default ordering kind
* \param[in] parent_symmetrical_s Parent element's symmetrical setting, if any
*
* \retval ordering_symmetric Ordering is symmetric
* \retval ordering_asymmetric Ordering is asymmetric
*/
static enum ordering_symmetry
get_ordering_symmetry(xmlNode *xml_obj, enum pe_order_kind parent_kind,
const char *parent_symmetrical_s)
{
int rc = pcmk_rc_ok;
bool symmetric = false;
enum pe_order_kind kind = parent_kind; // Default to parent's kind
// Check ordering XML for explicit kind
if ((crm_element_value(xml_obj, XML_ORDER_ATTR_KIND) != NULL)
|| (crm_element_value(xml_obj, XML_RULE_ATTR_SCORE) != NULL)) {
kind = get_ordering_type(xml_obj);
}
// Check ordering XML (and parent) for explicit symmetrical setting
rc = pcmk__xe_get_bool_attr(xml_obj, XML_CONS_ATTR_SYMMETRICAL, &symmetric);
if (rc != pcmk_rc_ok && parent_symmetrical_s != NULL) {
symmetric = crm_is_true(parent_symmetrical_s);
rc = pcmk_rc_ok;
}
if (rc == pcmk_rc_ok) {
if (symmetric) {
if (kind == pe_order_kind_serialize) {
pcmk__config_warn("Ignoring " XML_CONS_ATTR_SYMMETRICAL
" for '%s' because not valid with "
XML_ORDER_ATTR_KIND " of 'Serialize'",
ID(xml_obj));
} else {
return ordering_symmetric;
}
}
return ordering_asymmetric;
}
// Use default symmetry
if (kind == pe_order_kind_serialize) {
return ordering_asymmetric;
}
return ordering_symmetric;
}
/*!
* \internal
* \brief Get ordering flags appropriate to ordering kind
*
* \param[in] kind Ordering kind
* \param[in] first Action name for 'first' action
* \param[in] symmetry This ordering's symmetry role
*
* \return Minimal ordering flags appropriate to \p kind
*/
static enum pe_ordering
ordering_flags_for_kind(enum pe_order_kind kind, const char *first,
enum ordering_symmetry symmetry)
{
enum pe_ordering flags = pe_order_none; // so we trace-log all flags set
pe__set_order_flags(flags, pe_order_optional);
switch (kind) {
case pe_order_kind_optional:
break;
case pe_order_kind_serialize:
pe__set_order_flags(flags, pe_order_serialize_only);
break;
case pe_order_kind_mandatory:
switch (symmetry) {
case ordering_asymmetric:
pe__set_order_flags(flags, pe_order_asymmetrical);
break;
case ordering_symmetric:
pe__set_order_flags(flags, pe_order_implies_then);
if (pcmk__strcase_any_of(first, RSC_START, RSC_PROMOTE,
NULL)) {
pe__set_order_flags(flags, pe_order_runnable_left);
}
break;
case ordering_symmetric_inverse:
pe__set_order_flags(flags, pe_order_implies_first);
break;
}
break;
}
return flags;
}
/*!
* \internal
* \brief Find resource corresponding to ID specified in ordering
*
* \param[in] xml Ordering XML
* \param[in] resource_attr XML attribute name for resource ID
* \param[in] instance_attr XML attribute name for instance number
* \param[in] data_set Cluster working set
*
* \return Resource corresponding to \p id, or NULL if none
*/
static pe_resource_t *
get_ordering_resource(xmlNode *xml, const char *resource_attr,
const char *instance_attr, pe_working_set_t *data_set)
{
pe_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(xml, resource_attr);
const char *instance_id = crm_element_value(xml, instance_attr);
if (rsc_id == NULL) {
pcmk__config_err("Ignoring constraint '%s' without %s",
ID(xml), resource_attr);
return NULL;
}
rsc = pcmk__find_constraint_resource(data_set->resources, rsc_id);
if (rsc == NULL) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"does not exist", ID(xml), rsc_id);
return NULL;
}
if (instance_id != NULL) {
if (!pe_rsc_is_clone(rsc)) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"is not a clone but instance '%s' was requested",
ID(xml), rsc_id, instance_id);
return NULL;
}
rsc = find_clone_instance(rsc, instance_id, data_set);
if (rsc == NULL) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"does not have an instance '%s'",
"'%s'", ID(xml), rsc_id, instance_id);
return NULL;
}
}
return rsc;
}
/*!
* \internal
* \brief Determine minimum number of 'first' instances required in ordering
*
* \param[in] rsc 'First' resource in ordering
* \param[in] xml Ordering XML
*
* \return Minimum 'first' instances required (or 0 if not applicable)
*/
static int
get_minimum_first_instances(pe_resource_t *rsc, xmlNode *xml)
{
const char *clone_min = NULL;
bool require_all = false;
if (!pe_rsc_is_clone(rsc)) {
return 0;
}
clone_min = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_INCARNATION_MIN);
if (clone_min != NULL) {
int clone_min_int = 0;
pcmk__scan_min_int(clone_min, &clone_min_int, 0);
return clone_min_int;
}
/* @COMPAT 1.1.13:
* require-all=false is deprecated equivalent of clone-min=1
*/
if (pcmk__xe_get_bool_attr(xml, "require-all", &require_all) != ENODATA) {
pe_warn_once(pe_wo_require_all,
"Support for require-all in ordering constraints "
"is deprecated and will be removed in a future release"
" (use clone-min clone meta-attribute instead)");
if (!require_all) {
return 1;
}
}
return 0;
}
/*!
* \internal
* \brief Create orderings for a constraint with clone-min > 0
*
* \param[in] id Ordering ID
* \param[in] rsc_first 'First' resource in ordering (a clone)
* \param[in] action_first 'First' action in ordering
* \param[in] rsc_then 'Then' resource in ordering
* \param[in] action_then 'Then' action in ordering
* \param[in] flags Ordering flags
* \param[in] clone_min Minimum required instances of 'first'
* \param[in] data_set Cluster working set
*/
static void
clone_min_ordering(const char *id,
pe_resource_t *rsc_first, const char *action_first,
pe_resource_t *rsc_then, const char *action_then,
enum pe_ordering flags, int clone_min,
pe_working_set_t *data_set)
{
// Create a pseudo-action for when the minimum instances are active
char *task = crm_strdup_printf(CRM_OP_RELAXED_CLONE ":%s", id);
pe_action_t *clone_min_met = get_pseudo_op(task, data_set);
free(task);
/* Require the pseudo-action to have the required number of actions to be
* considered runnable before allowing the pseudo-action to be runnable.
*/
clone_min_met->required_runnable_before = clone_min;
pe__set_action_flags(clone_min_met, pe_action_requires_any);
// Order the actions for each clone instance before the pseudo-action
for (GList *rIter = rsc_first->children; rIter != NULL;
rIter = rIter->next) {
pe_resource_t *child = rIter->data;
pcmk__new_ordering(child, pcmk__op_key(child->id, action_first, 0),
NULL, NULL, NULL, clone_min_met,
pe_order_one_or_more|pe_order_implies_then_printed,
data_set);
}
// Order "then" action after the pseudo-action (if runnable)
pcmk__new_ordering(NULL, NULL, clone_min_met, rsc_then,
pcmk__op_key(rsc_then->id, action_then, 0),
NULL, flags|pe_order_runnable_left, data_set);
}
/*!
* \internal
* \brief Update ordering flags for restart-type=restart
*
* \param[in] rsc 'Then' resource in ordering
* \param[in] kind Ordering kind
* \param[in] flag Ordering flag to set (when applicable)
* \param[out] flags Ordering flag set to update
*
* \compat The restart-type resource meta-attribute is deprecated. Eventually,
* it will be removed, and pe_restart_ignore will be the only behavior,
* at which time this can just be removed entirely.
*/
#define handle_restart_type(rsc, kind, flag, flags) do { \
if (((kind) == pe_order_kind_optional) \
&& ((rsc)->restart_type == pe_restart_restart)) { \
pe__set_order_flags((flags), (flag)); \
} \
} while (0)
/*!
* \internal
* \brief Create new ordering for inverse of symmetric constraint
*
* \param[in] id Ordering ID (for logging only)
* \param[in] kind Ordering kind
* \param[in] rsc_first 'First' resource in ordering (a clone)
* \param[in] action_first 'First' action in ordering
* \param[in] rsc_then 'Then' resource in ordering
* \param[in] action_then 'Then' action in ordering
* \param[in] data_set Cluster working set
*/
static void
inverse_ordering(const char *id, enum pe_order_kind kind,
pe_resource_t *rsc_first, const char *action_first,
pe_resource_t *rsc_then, const char *action_then,
pe_working_set_t *data_set)
{
action_then = invert_action(action_then);
action_first = invert_action(action_first);
if ((action_then == NULL) || (action_first == NULL)) {
pcmk__config_warn("Cannot invert constraint '%s' "
"(please specify inverse manually)", id);
} else {
enum pe_ordering flags = ordering_flags_for_kind(kind, action_first,
ordering_symmetric_inverse);
handle_restart_type(rsc_then, kind, pe_order_implies_first, flags);
pcmk__order_resource_actions(rsc_then, action_then, rsc_first,
action_first, flags, data_set);
}
}
static void
unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
{
pe_resource_t *rsc_then = NULL;
pe_resource_t *rsc_first = NULL;
int min_required_before = 0;
enum pe_order_kind kind = pe_order_kind_mandatory;
enum pe_ordering cons_weight = pe_order_none;
enum ordering_symmetry symmetry;
const char *action_then = NULL;
const char *action_first = NULL;
const char *id = NULL;
CRM_CHECK(xml_obj != NULL, return);
id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
crm_element_name(xml_obj));
return;
}
rsc_first = get_ordering_resource(xml_obj, XML_ORDER_ATTR_FIRST,
XML_ORDER_ATTR_FIRST_INSTANCE,
data_set);
if (rsc_first == NULL) {
return;
}
rsc_then = get_ordering_resource(xml_obj, XML_ORDER_ATTR_THEN,
XML_ORDER_ATTR_THEN_INSTANCE,
data_set);
if (rsc_then == NULL) {
return;
}
action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION);
if (action_first == NULL) {
action_first = RSC_START;
}
action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION);
if (action_then == NULL) {
action_then = action_first;
}
kind = get_ordering_type(xml_obj);
symmetry = get_ordering_symmetry(xml_obj, kind, NULL);
cons_weight = ordering_flags_for_kind(kind, action_first, symmetry);
handle_restart_type(rsc_then, kind, pe_order_implies_then, cons_weight);
/* If there is a minimum number of instances that must be runnable before
* the 'then' action is runnable, we use a pseudo-action for convenience:
* minimum number of clone instances have runnable actions ->
* pseudo-action is runnable -> dependency is runnable.
*/
min_required_before = get_minimum_first_instances(rsc_first, xml_obj);
if (min_required_before > 0) {
clone_min_ordering(id, rsc_first, action_first, rsc_then, action_then,
cons_weight, min_required_before, data_set);
} else {
pcmk__order_resource_actions(rsc_first, action_first, rsc_then,
action_then, cons_weight, data_set);
}
if (symmetry == ordering_symmetric) {
inverse_ordering(id, kind, rsc_first, action_first,
rsc_then, action_then, data_set);
}
}
static char *
task_from_action_or_key(pe_action_t *action, const char *key)
{
char *res = NULL;
if (action != NULL) {
res = strdup(action->task);
} else if (key != NULL) {
parse_op_key(key, NULL, &res, NULL);
}
return res;
}
/*!
* \internal
* \brief Apply start/stop orderings to migrations
*
* Orderings involving start, stop, demote, and promote actions must be honored
* during a migration as well, so duplicate any such ordering for the
* corresponding migration actions.
*
* \param[in] order Ordering constraint to check
* \param[in] data_set Cluster working set
*/
static void
handle_migration_ordering(pe__ordering_t *order, pe_working_set_t *data_set)
{
char *lh_task = NULL;
char *rh_task = NULL;
bool rh_migratable;
bool lh_migratable;
// Only orderings between two different resources are relevant
if ((order->lh_rsc == NULL) || (order->rh_rsc == NULL)
|| (order->lh_rsc == order->rh_rsc)) {
return;
}
// Constraints between a parent resource and its children are not relevant
if (is_parent(order->lh_rsc, order->rh_rsc)
|| is_parent(order->rh_rsc, order->lh_rsc)) {
return;
}
// Only orderings involving at least one migratable resource are relevant
lh_migratable = pcmk_is_set(order->lh_rsc->flags, pe_rsc_allow_migrate);
rh_migratable = pcmk_is_set(order->rh_rsc->flags, pe_rsc_allow_migrate);
if (!lh_migratable && !rh_migratable) {
return;
}
// Check which actions are involved
lh_task = task_from_action_or_key(order->lh_action, order->lh_action_task);
rh_task = task_from_action_or_key(order->rh_action, order->rh_action_task);
if ((lh_task == NULL) || (rh_task == NULL)) {
goto cleanup_order;
}
if (pcmk__str_eq(lh_task, RSC_START, pcmk__str_casei)
&& pcmk__str_eq(rh_task, RSC_START, pcmk__str_casei)) {
int flags = pe_order_optional;
if (lh_migratable && rh_migratable) {
/* A start then B start
* -> A migrate_from then B migrate_to */
pcmk__new_ordering(order->lh_rsc,
pcmk__op_key(order->lh_rsc->id, RSC_MIGRATED, 0),
NULL, order->rh_rsc,
pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
NULL, flags, data_set);
}
if (rh_migratable) {
if (lh_migratable) {
pe__set_order_flags(flags, pe_order_apply_first_non_migratable);
}
/* A start then B start
* -> A start then B migrate_to (if start is not part of a
* migration)
*/
pcmk__new_ordering(order->lh_rsc,
pcmk__op_key(order->lh_rsc->id, RSC_START, 0),
NULL, order->rh_rsc,
pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
NULL, flags, data_set);
}
} else if (rh_migratable && pcmk__str_eq(lh_task, RSC_STOP, pcmk__str_casei)
&& pcmk__str_eq(rh_task, RSC_STOP, pcmk__str_casei)) {
int flags = pe_order_optional;
if (lh_migratable) {
pe__set_order_flags(flags, pe_order_apply_first_non_migratable);
}
/* For an ordering "stop A then stop B", if A is moving via restart, and
* B is migrating, enforce that B's migrate_to occurs after A's stop.
*/
pcmk__new_ordering(order->lh_rsc,
pcmk__op_key(order->lh_rsc->id, RSC_STOP, 0), NULL,
order->rh_rsc,
pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
NULL, flags, data_set);
// Also order B's migrate_from after A's stop during partial migrations
if (order->rh_rsc->partial_migration_target) {
pcmk__new_ordering(order->lh_rsc,
pcmk__op_key(order->lh_rsc->id, RSC_STOP, 0),
NULL, order->rh_rsc,
pcmk__op_key(order->rh_rsc->id, RSC_MIGRATED, 0),
NULL, flags, data_set);
}
} else if (pcmk__str_eq(lh_task, RSC_PROMOTE, pcmk__str_casei)
&& pcmk__str_eq(rh_task, RSC_START, pcmk__str_casei)) {
int flags = pe_order_optional;
if (rh_migratable) {
/* A promote then B start
* -> A promote then B migrate_to */
pcmk__new_ordering(order->lh_rsc,
pcmk__op_key(order->lh_rsc->id, RSC_PROMOTE, 0),
NULL, order->rh_rsc,
pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
NULL, flags, data_set);
}
} else if (pcmk__str_eq(lh_task, RSC_DEMOTE, pcmk__str_casei)
&& pcmk__str_eq(rh_task, RSC_STOP, pcmk__str_casei)) {
int flags = pe_order_optional;
if (rh_migratable) {
/* A demote then B stop
* -> A demote then B migrate_to */
pcmk__new_ordering(order->lh_rsc,
pcmk__op_key(order->lh_rsc->id, RSC_DEMOTE, 0),
NULL, order->rh_rsc,
pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
NULL, flags, data_set);
// Also order B migrate_from after A demote during partial migrations
if (order->rh_rsc->partial_migration_target) {
pcmk__new_ordering(order->lh_rsc,
pcmk__op_key(order->lh_rsc->id, RSC_DEMOTE, 0),
NULL, order->rh_rsc,
pcmk__op_key(order->rh_rsc->id, RSC_MIGRATED, 0),
NULL, flags, data_set);
}
}
}
cleanup_order:
free(lh_task);
free(rh_task);
}
/*!
* \internal
* \brief Create a new ordering between two actions
*
* \param[in] lh_rsc Resource for 'first' action (if NULL and
* \p lh_action is a resource action, that
* resource will be used)
* \param[in] lh_action_task Action key for 'first' action (if NULL and
* \p lh_action is not NULL, its UUID will be used)
* \param[in] lh_action 'first' action (if NULL, \p lh_rsc and
* \p lh_action_task must be set)
*
* \param[in] rh_rsc Resource for 'then' action (if NULL and
* \p rh_action is a resource action, that
* resource will be used)
* \param[in] rh_action_task Action key for 'then' action (if NULL and
* \p rh_action is not NULL, its UUID will be used)
* \param[in] rh_action 'then' action (if NULL, \p rh_rsc and
* \p rh_action_task must be set)
*
* \param[in] type Flag set of enum pe_ordering
* \param[in] data_set Cluster working set to add ordering to
*
* \note This function takes ownership of lh_action_task and rh_action_task,
* which do not need to be freed by the caller.
*/
void
pcmk__new_ordering(pe_resource_t *lh_rsc, char *lh_action_task,
pe_action_t *lh_action, pe_resource_t *rh_rsc,
char *rh_action_task, pe_action_t *rh_action,
enum pe_ordering type, pe_working_set_t *data_set)
{
pe__ordering_t *order = NULL;
// One of action or resource must be specified for each side
CRM_CHECK(((lh_action != NULL) || (lh_rsc != NULL))
&& ((rh_action != NULL) || (rh_rsc != NULL)),
free(lh_action_task); free(rh_action_task); return);
if ((lh_rsc == NULL) && (lh_action != NULL)) {
lh_rsc = lh_action->rsc;
}
if ((rh_rsc == NULL) && (rh_action != NULL)) {
rh_rsc = rh_action->rsc;
}
order = calloc(1, sizeof(pe__ordering_t));
CRM_ASSERT(order != NULL);
order->id = data_set->order_id++;
order->type = type;
order->lh_rsc = lh_rsc;
order->rh_rsc = rh_rsc;
order->lh_action = lh_action;
order->rh_action = rh_action;
order->lh_action_task = lh_action_task;
order->rh_action_task = rh_action_task;
if ((order->lh_action_task == NULL) && (lh_action != NULL)) {
order->lh_action_task = strdup(lh_action->uuid);
}
if ((order->rh_action_task == NULL) && (rh_action != NULL)) {
order->rh_action_task = strdup(rh_action->uuid);
}
if ((order->lh_rsc == NULL) && (lh_action != NULL)) {
order->lh_rsc = lh_action->rsc;
}
if ((order->rh_rsc == NULL) && (rh_action != NULL)) {
order->rh_rsc = rh_action->rsc;
}
pe_rsc_trace(lh_rsc, "Created ordering %d for %s then %s",
(data_set->order_id - 1),
((lh_action_task == NULL)? "?" : lh_action_task),
((rh_action_task == NULL)? "?" : rh_action_task));
data_set->ordering_constraints = g_list_prepend(data_set->ordering_constraints,
order);
handle_migration_ordering(order, data_set);
}
/*!
* \brief Unpack a set in an ordering constraint
*
* \param[in] set Set XML to unpack
* \param[in] parent_kind rsc_order XML "kind" attribute
* \param[in] parent_symmetrical_s rsc_order XML "symmetrical" attribute
* \param[in] data_set Cluster working set
*
* \return Standard Pacemaker return code
*/
static int
unpack_order_set(xmlNode *set, enum pe_order_kind parent_kind,
const char *parent_symmetrical_s, pe_working_set_t *data_set)
{
xmlNode *xml_rsc = NULL;
GList *set_iter = NULL;
GList *resources = NULL;
pe_resource_t *last = NULL;
pe_resource_t *resource = NULL;
int local_kind = parent_kind;
bool sequential = false;
enum pe_ordering flags = pe_order_optional;
enum ordering_symmetry symmetry;
char *key = NULL;
const char *id = ID(set);
const char *action = crm_element_value(set, "action");
const char *sequential_s = crm_element_value(set, "sequential");
const char *kind_s = crm_element_value(set, XML_ORDER_ATTR_KIND);
if (action == NULL) {
action = RSC_START;
}
if (kind_s) {
local_kind = get_ordering_type(set);
}
if (sequential_s == NULL) {
sequential_s = "1";
}
sequential = crm_is_true(sequential_s);
symmetry = get_ordering_symmetry(set, parent_kind, parent_symmetrical_s);
flags = ordering_flags_for_kind(local_kind, action, symmetry);
for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
EXPAND_CONSTRAINT_IDREF(id, resource, ID(xml_rsc));
resources = g_list_append(resources, resource);
}
if (pcmk__list_of_1(resources)) {
crm_trace("Single set: %s", id);
goto done;
}
set_iter = resources;
while (set_iter != NULL) {
resource = (pe_resource_t *) set_iter->data;
set_iter = set_iter->next;
key = pcmk__op_key(resource->id, action, 0);
if (local_kind == pe_order_kind_serialize) {
/* Serialize before everything that comes after */
for (GList *gIter = set_iter; gIter != NULL; gIter = gIter->next) {
pe_resource_t *then_rsc = (pe_resource_t *) gIter->data;
char *then_key = pcmk__op_key(then_rsc->id, action, 0);
pcmk__new_ordering(resource, strdup(key), NULL, then_rsc,
then_key, NULL, flags, data_set);
}
} else if (sequential) {
if (last != NULL) {
pcmk__order_resource_actions(last, action, resource, action,
flags, data_set);
}
last = resource;
}
free(key);
}
if (symmetry == ordering_asymmetric) {
goto done;
}
last = NULL;
action = invert_action(action);
flags = ordering_flags_for_kind(local_kind, action,
ordering_symmetric_inverse);
set_iter = resources;
while (set_iter != NULL) {
resource = (pe_resource_t *) set_iter->data;
set_iter = set_iter->next;
if (sequential) {
if (last != NULL) {
pcmk__order_resource_actions(resource, action, last, action,
flags, data_set);
}
last = resource;
}
}
done:
g_list_free(resources);
return pcmk_rc_ok;
}
/*!
* \brief Order two resource sets relative to each other
*
* \param[in] id Ordering ID (for logging)
* \param[in] set1 First listed set
* \param[in] set2 Second listed set
* \param[in] kind Ordering kind
* \param[in] data_set Cluster working set
* \param[in] symmetry Which ordering symmetry applies to this relation
*
* \return Standard Pacemaker return code
*/
static int
order_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2,
enum pe_order_kind kind, pe_working_set_t *data_set,
enum ordering_symmetry symmetry)
{
xmlNode *xml_rsc = NULL;
xmlNode *xml_rsc_2 = NULL;
pe_resource_t *rsc_1 = NULL;
pe_resource_t *rsc_2 = NULL;
const char *action_1 = crm_element_value(set1, "action");
const char *action_2 = crm_element_value(set2, "action");
enum pe_ordering flags = pe_order_none;
bool require_all = true;
pcmk__xe_get_bool_attr(set1, "require-all", &require_all);
if (action_1 == NULL) {
action_1 = RSC_START;
}
if (action_2 == NULL) {
action_2 = RSC_START;
}
if (symmetry == ordering_symmetric_inverse) {
action_1 = invert_action(action_1);
action_2 = invert_action(action_2);
}
if (pcmk__str_eq(RSC_STOP, action_1, pcmk__str_casei)
|| pcmk__str_eq(RSC_DEMOTE, action_1, pcmk__str_casei)) {
/* Assuming: A -> ( B || C) -> D
* The one-or-more logic only applies during the start/promote phase.
* During shutdown neither B nor can shutdown until D is down, so simply
* turn require_all back on.
*/
require_all = true;
}
// @TODO is action_2 correct here?
flags = ordering_flags_for_kind(kind, action_2, symmetry);
/* If we have an unordered set1, whether it is sequential or not is
* irrelevant in regards to set2.
*/
if (!require_all) {
char *task = crm_strdup_printf(CRM_OP_RELAXED_SET ":%s", ID(set1));
pe_action_t *unordered_action = get_pseudo_op(task, data_set);
free(task);
pe__set_action_flags(unordered_action, pe_action_requires_any);
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
/* Add an ordering constraint between every element in set1 and the
* pseudo action. If any action in set1 is runnable the pseudo
* action will be runnable.
*/
pcmk__new_ordering(rsc_1, pcmk__op_key(rsc_1->id, action_1, 0),
NULL, NULL, NULL, unordered_action,
pe_order_one_or_more|pe_order_implies_then_printed,
data_set);
}
for (xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc_2 != NULL; xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) {
EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
/* Add an ordering constraint between the pseudo-action and every
* element in set2. If the pseudo-action is runnable, every action
* in set2 will be runnable.
*/
pcmk__new_ordering(NULL, NULL, unordered_action,
rsc_2, pcmk__op_key(rsc_2->id, action_2, 0),
NULL, flags|pe_order_runnable_left, data_set);
}
return pcmk_rc_ok;
}
if (pcmk__xe_attr_is_true(set1, "sequential")) {
if (symmetry == ordering_symmetric_inverse) {
// Get the first one
xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
if (xml_rsc != NULL) {
EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
}
} else {
// Get the last one
const char *rid = NULL;
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
rid = ID(xml_rsc);
}
EXPAND_CONSTRAINT_IDREF(id, rsc_1, rid);
}
}
if (pcmk__xe_attr_is_true(set2, "sequential")) {
if (symmetry == ordering_symmetric_inverse) {
// Get the last one
const char *rid = NULL;
for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
rid = ID(xml_rsc);
}
EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid);
} else {
// Get the first one
xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
if (xml_rsc != NULL) {
EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc));
}
}
}
if ((rsc_1 != NULL) && (rsc_2 != NULL)) {
pcmk__order_resource_actions(rsc_1, action_1, rsc_2, action_2, flags,
data_set);
} else if (rsc_1 != NULL) {
for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc));
pcmk__order_resource_actions(rsc_1, action_1, rsc_2, action_2,
flags, data_set);
}
} else if (rsc_2 != NULL) {
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
pcmk__order_resource_actions(rsc_1, action_1, rsc_2, action_2,
flags, data_set);
}
} else {
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
for (xmlNode *xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc_2 != NULL; xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) {
EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
pcmk__order_resource_actions(rsc_1, action_1, rsc_2,
action_2, flags, data_set);
}
}
}
return pcmk_rc_ok;
}
/*!
* \internal
* \brief If an ordering constraint uses resource tags, expand them
*
* \param[in] xml_obj Ordering constraint XML
* \param[out] expanded_xml Equivalent XML with tags expanded
* \param[in] data_set Cluster working set
*
* \return Standard Pacemaker return code (specifically, pcmk_rc_ok on success,
* and pcmk_rc_schema_validation on invalid configuration)
*/
static int
unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
pe_working_set_t *data_set)
{
const char *id_first = NULL;
const char *id_then = NULL;
const char *action_first = NULL;
const char *action_then = NULL;
pe_resource_t *rsc_first = NULL;
pe_resource_t *rsc_then = NULL;
pe_tag_t *tag_first = NULL;
pe_tag_t *tag_then = NULL;
xmlNode *rsc_set_first = NULL;
xmlNode *rsc_set_then = NULL;
bool any_sets = false;
// Check whether there are any resource sets with template or tag references
*expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_order");
return pcmk_rc_ok;
}
id_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST);
id_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN);
if ((id_first == NULL) || (id_then == NULL)) {
return pcmk_rc_ok;
}
if (!pcmk__valid_resource_or_tag(data_set, id_first, &rsc_first,
&tag_first)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", ID(xml_obj), id_first);
return pcmk_rc_schema_validation;
}
if (!pcmk__valid_resource_or_tag(data_set, id_then, &rsc_then, &tag_then)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", ID(xml_obj), id_then);
return pcmk_rc_schema_validation;
}
if ((rsc_first != NULL) && (rsc_then != NULL)) {
// Neither side references a template or tag
return pcmk_rc_ok;
}
action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION);
action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION);
*expanded_xml = copy_xml(xml_obj);
// Convert template/tag reference in "first" into resource_set under constraint
if (!pcmk__tag_to_set(*expanded_xml, &rsc_set_first, XML_ORDER_ATTR_FIRST,
true, data_set)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_schema_validation;
}
if (rsc_set_first != NULL) {
if (action_first != NULL) {
// Move "first-action" into converted resource_set as "action"
crm_xml_add(rsc_set_first, "action", action_first);
xml_remove_prop(*expanded_xml, XML_ORDER_ATTR_FIRST_ACTION);
}
any_sets = true;
}
// Convert template/tag reference in "then" into resource_set under constraint
if (!pcmk__tag_to_set(*expanded_xml, &rsc_set_then, XML_ORDER_ATTR_THEN,
true, data_set)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_schema_validation;
}
if (rsc_set_then != NULL) {
if (action_then != NULL) {
// Move "then-action" into converted resource_set as "action"
crm_xml_add(rsc_set_then, "action", action_then);
xml_remove_prop(*expanded_xml, XML_ORDER_ATTR_THEN_ACTION);
}
any_sets = true;
}
if (any_sets) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_order");
} else {
free_xml(*expanded_xml);
*expanded_xml = NULL;
}
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Unpack ordering constraint XML
*
* \param[in] xml_obj Ordering constraint XML to unpack
* \param[in,out] data_set Cluster working set
*/
void
pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
{
xmlNode *set = NULL;
xmlNode *last = NULL;
xmlNode *orig_xml = NULL;
xmlNode *expanded_xml = NULL;
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
const char *invert = crm_element_value(xml_obj, XML_CONS_ATTR_SYMMETRICAL);
enum pe_order_kind kind = get_ordering_type(xml_obj);
enum ordering_symmetry symmetry = get_ordering_symmetry(xml_obj, kind,
NULL);
// Expand any resource tags in the constraint XML
if (unpack_order_tags(xml_obj, &expanded_xml, data_set) != pcmk_rc_ok) {
return;
}
if (expanded_xml != NULL) {
orig_xml = xml_obj;
xml_obj = expanded_xml;
}
// If the constraint has resource sets, unpack them
for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET);
set != NULL; set = crm_next_same_xml(set)) {
set = expand_idref(set, data_set->input);
if ((set == NULL) // Configuration error, message already logged
|| (unpack_order_set(set, kind, invert, data_set) != pcmk_rc_ok)) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
}
return;
}
if (last != NULL) {
if (order_rsc_sets(id, last, set, kind, data_set,
symmetry) != pcmk_rc_ok) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
}
return;
}
if ((symmetry == ordering_symmetric)
&& (order_rsc_sets(id, set, last, kind, data_set,
ordering_symmetric_inverse) != pcmk_rc_ok)) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
}
return;
}
}
last = set;
}
if (expanded_xml) {
free_xml(expanded_xml);
xml_obj = orig_xml;
}
// If the constraint has no resource sets, unpack it as a simple ordering
if (last == NULL) {
return unpack_simple_rsc_order(xml_obj, data_set);
}
}
static bool
ordering_is_invalid(pe_action_t *action, pe_action_wrapper_t *input)
{
/* Prevent user-defined ordering constraints between resources
* running in a guest node and the resource that defines that node.
*/
if (!pcmk_is_set(input->type, pe_order_preserve)
&& (input->action->rsc != NULL)
&& pcmk__rsc_corresponds_to_guest(action->rsc, input->action->node)) {
crm_warn("Invalid ordering constraint between %s and %s",
input->action->rsc->id, action->rsc->id);
return true;
}
/* If there's an order like
* "rscB_stop node2"-> "load_stopped_node2" -> "rscA_migrate_to node1"
*
* then rscA is being migrated from node1 to node2, while rscB is being
* migrated from node2 to node1. If there would be a graph loop,
* break the order "load_stopped_node2" -> "rscA_migrate_to node1".
*/
if ((input->type == pe_order_load) && action->rsc
&& pcmk__str_eq(action->task, RSC_MIGRATE, pcmk__str_casei)
&& pcmk__graph_has_loop(action, action, input)) {
return true;
}
return false;
}
void
pcmk__disable_invalid_orderings(pe_working_set_t *data_set)
{
for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
pe_action_t *action = (pe_action_t *) iter->data;
pe_action_wrapper_t *input = NULL;
for (GList *input_iter = action->actions_before;
input_iter != NULL; input_iter = input_iter->next) {
input = (pe_action_wrapper_t *) input_iter->data;
if (ordering_is_invalid(action, input)) {
input->type = pe_order_none;
}
}
}
}
/*!
* \internal
* \brief Order stops on a node before the node's shutdown
*
* \param[in] node Node being shut down
* \param[in] shutdown_op Shutdown action for node
* \param[in] data_set Cluster working set
*/
void
pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op,
pe_working_set_t *data_set)
{
for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
pe_action_t *action = (pe_action_t *) iter->data;
// Only stops on the node shutting down are relevant
if ((action->rsc == NULL) || (action->node == NULL)
|| (action->node->details != node->details)
|| !pcmk__str_eq(action->task, RSC_STOP, pcmk__str_casei)) {
continue;
}
// Resources and nodes in maintenance mode won't be touched
if (pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)) {
pe_rsc_trace(action->rsc,
"Not ordering %s before %s shutdown because "
"resource in maintenance mode",
action->uuid, node->details->uname);
continue;
} else if (node->details->maintenance) {
pe_rsc_trace(action->rsc,
"Not ordering %s before %s shutdown because "
"node in maintenance mode",
action->uuid, node->details->uname);
continue;
}
/* Don't touch a resource that is unmanaged or blocked, to avoid
* blocking the shutdown (though if another action depends on this one,
* we may still end up blocking)
*/
if (!pcmk_any_flags_set(action->rsc->flags,
pe_rsc_managed|pe_rsc_block)) {
pe_rsc_trace(action->rsc,
"Not ordering %s before %s shutdown because "
"resource is unmanaged or blocked",
action->uuid, node->details->uname);
continue;
}
pe_rsc_trace(action->rsc, "Ordering %s before %s shutdown",
action->uuid, node->details->uname);
pe__clear_action_flags(action, pe_action_optional);
pcmk__new_ordering(action->rsc, NULL, action, NULL,
strdup(CRM_OP_SHUTDOWN), shutdown_op,
pe_order_optional|pe_order_runnable_left, data_set);
}
}
/*!
* \brief Find resource actions matching directly or as child
*
* \param[in] rsc Resource to check
* \param[in] original_key Action key to search for (possibly referencing
* parent of \rsc)
*
* \return Newly allocated list of matching actions
* \note It is the caller's responsibility to free the result with g_list_free()
*/
static GList *
find_actions_by_task(pe_resource_t *rsc, const char *original_key)
{
// Search under given task key directly
GList *list = find_actions(rsc->actions, original_key, NULL);
if (list == NULL) {
// Search again using this resource's ID
char *key = NULL;
char *task = NULL;
guint interval_ms = 0;
if (parse_op_key(original_key, NULL, &task, &interval_ms)) {
key = pcmk__op_key(rsc->id, task, interval_ms);
list = find_actions(rsc->actions, key, NULL);
free(key);
free(task);
} else {
crm_err("Invalid operation key (bug?): %s", original_key);
}
}
return list;
}
static void
rsc_order_then(pe_action_t *lh_action, pe_resource_t *rsc,
pe__ordering_t *order)
{
GList *rh_actions = NULL;
pe_action_t *rh_action = NULL;
enum pe_ordering type;
CRM_CHECK(rsc != NULL, return);
CRM_CHECK(order != NULL, return);
type = order->type;
rh_action = order->rh_action;
crm_trace("Applying ordering constraint %d (then: %s)", order->id, rsc->id);
if (rh_action != NULL) {
rh_actions = g_list_prepend(NULL, rh_action);
} else if (rsc != NULL) {
rh_actions = find_actions_by_task(rsc, order->rh_action_task);
}
if (rh_actions == NULL) {
pe_rsc_trace(rsc,
"Ignoring constraint %d: then (%s for %s) not found",
order->id, order->rh_action_task, rsc->id);
return;
}
if ((lh_action != NULL) && (lh_action->rsc == rsc)
&& pcmk_is_set(lh_action->flags, pe_action_dangle)) {
pe_rsc_trace(rsc, "Detected dangling operation %s -> %s",
lh_action->uuid, order->rh_action_task);
pe__clear_order_flags(type, pe_order_implies_then);
}
for (GList *gIter = rh_actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *rh_action_iter = (pe_action_t *) gIter->data;
if (lh_action) {
order_actions(lh_action, rh_action_iter, type);
} else if (type & pe_order_implies_then) {
pe__clear_action_flags(rh_action_iter, pe_action_runnable);
crm_warn("Unrunnable %s %#.6x", rh_action_iter->uuid, type);
} else {
crm_warn("neither %s %#.6x", rh_action_iter->uuid, type);
}
}
g_list_free(rh_actions);
}
static void
rsc_order_first(pe_resource_t *lh_rsc, pe__ordering_t *order,
pe_working_set_t *data_set)
{
GList *lh_actions = NULL;
pe_action_t *lh_action = order->lh_action;
pe_resource_t *rh_rsc = order->rh_rsc;
CRM_ASSERT(lh_rsc != NULL);
pe_rsc_trace(lh_rsc, "Applying ordering constraint %d (first: %s)",
order->id, lh_rsc->id);
if (lh_action != NULL) {
lh_actions = g_list_prepend(NULL, lh_action);
} else {
lh_actions = find_actions_by_task(lh_rsc, order->lh_action_task);
}
if ((lh_actions == NULL) && (lh_rsc == rh_rsc)) {
pe_rsc_trace(lh_rsc,
"Ignoring constraint %d: first (%s for %s) not found",
order->id, order->lh_action_task, lh_rsc->id);
} else if (lh_actions == NULL) {
char *key = NULL;
char *op_type = NULL;
guint interval_ms = 0;
parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms);
key = pcmk__op_key(lh_rsc->id, op_type, interval_ms);
if ((lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED)
&& pcmk__str_eq(op_type, RSC_STOP, pcmk__str_casei)) {
free(key);
pe_rsc_trace(lh_rsc,
"Ignoring constraint %d: first (%s for %s) not found",
order->id, order->lh_action_task, lh_rsc->id);
} else if ((lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_UNPROMOTED)
&& pcmk__str_eq(op_type, RSC_DEMOTE, pcmk__str_casei)) {
free(key);
pe_rsc_trace(lh_rsc,
"Ignoring constraint %d: first (%s for %s) not found",
order->id, order->lh_action_task, lh_rsc->id);
} else {
pe_rsc_trace(lh_rsc,
"Creating first (%s for %s) for constraint %d ",
order->lh_action_task, lh_rsc->id, order->id);
lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set);
lh_actions = g_list_prepend(NULL, lh_action);
}
free(op_type);
}
if (rh_rsc == NULL) {
if (order->rh_action == NULL) {
pe_rsc_trace(lh_rsc, "Ignoring constraint %d: then not found",
order->id);
return;
}
rh_rsc = order->rh_action->rsc;
}
for (GList *gIter = lh_actions; gIter != NULL; gIter = gIter->next) {
lh_action = (pe_action_t *) gIter->data;
if (rh_rsc == NULL) {
order_actions(lh_action, order->rh_action, order->type);
} else {
rsc_order_then(lh_action, rh_rsc, order);
}
}
g_list_free(lh_actions);
}
void
pcmk__apply_orderings(pe_working_set_t *data_set)
{
crm_trace("Applying ordering constraints");
/* Don't ask me why, but apparently they need to be processed in
* the order they were created in... go figure
*
* Also g_list_append() has horrendous performance characteristics
* So we need to use g_list_prepend() and then reverse the list here
*/
data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
for (GList *gIter = data_set->ordering_constraints;
gIter != NULL; gIter = gIter->next) {
pe__ordering_t *order = gIter->data;
pe_resource_t *rsc = order->lh_rsc;
if (rsc != NULL) {
rsc_order_first(rsc, order, data_set);
continue;
}
rsc = order->rh_rsc;
if (rsc != NULL) {
rsc_order_then(order->lh_action, rsc, order);
} else {
crm_trace("Applying ordering constraint %d (non-resource actions)",
order->id);
order_actions(order->lh_action, order->rh_action, order->type);
}
}
g_list_foreach(data_set->actions, (GFunc) pcmk__block_colocated_starts,
data_set);
crm_trace("Ordering probes");
pcmk__order_probes(data_set);
crm_trace("Updating %d actions", g_list_length(data_set->actions));
g_list_foreach(data_set->actions,
(GFunc) pcmk__update_action_for_orderings, data_set);
pcmk__disable_invalid_orderings(data_set);
}
+
+/*!
+ * \internal
+ * \brief Order a given action after each action in a given list
+ *
+ * \param[in] after "After" action
+ * \param[in] list List of "before" actions
+ */
+void
+pcmk__order_after_each(pe_action_t *after, GList *list)
+{
+ const char *after_desc = (after->task == NULL)? after->uuid : after->task;
+
+ for (GList *iter = list; iter != NULL; iter = iter->next) {
+ pe_action_t *before = (pe_action_t *) iter->data;
+ const char *before_desc = before->task? before->task : before->uuid;
+
+ crm_debug("Ordering %s on %s before %s on %s",
+ before_desc, crm_str(before->node->details->uname),
+ after_desc, crm_str(after->node->details->uname));
+ order_actions(before, after, pe_order_optional);
+ }
+}
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index d5ded2e95c..68a8440bec 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1,2615 +1,2616 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pe_status_private.h"
extern bool pcmk__is_daemon;
void print_str_str(gpointer key, gpointer value, gpointer user_data);
gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
pe_working_set_t * data_set, guint interval_ms);
static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key,
gboolean include_disabled);
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *
pe_rsc_action_details(pe_action_t *action)
{
pe_rsc_action_details_t *details;
CRM_CHECK(action != NULL, return NULL);
if (action->action_details == NULL) {
action->action_details = calloc(1, sizeof(pe_rsc_action_details_t));
CRM_CHECK(action->action_details != NULL, return NULL);
}
details = (pe_rsc_action_details_t *) action->action_details;
if (details->versioned_parameters == NULL) {
details->versioned_parameters = create_xml_node(NULL,
XML_TAG_OP_VER_ATTRS);
}
if (details->versioned_meta == NULL) {
details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META);
}
return details;
}
static void
pe_free_rsc_action_details(pe_action_t *action)
{
pe_rsc_action_details_t *details;
if ((action == NULL) || (action->action_details == NULL)) {
return;
}
details = (pe_rsc_action_details_t *) action->action_details;
if (details->versioned_parameters) {
free_xml(details->versioned_parameters);
}
if (details->versioned_meta) {
free_xml(details->versioned_meta);
}
action->action_details = NULL;
}
#endif
/*!
* \internal
* \brief Check whether we can fence a particular node
*
* \param[in] data_set Working set for cluster
* \param[in] node Name of node to check
*
* \return true if node can be fenced, false otherwise
*/
bool
pe_can_fence(pe_working_set_t *data_set, pe_node_t *node)
{
if (pe__is_guest_node(node)) {
/* Guest nodes are fenced by stopping their container resource. We can
* do that if the container's host is either online or fenceable.
*/
pe_resource_t *rsc = node->details->remote_rsc->container;
for (GList *n = rsc->running_on; n != NULL; n = n->next) {
pe_node_t *container_node = n->data;
if (!container_node->details->online
&& !pe_can_fence(data_set, container_node)) {
return false;
}
}
return true;
} else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
return false; /* Turned off */
} else if (!pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
return false; /* No devices */
} else if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
return true;
} else if (data_set->no_quorum_policy == no_quorum_ignore) {
return true;
} else if(node == NULL) {
return false;
} else if(node->details->online) {
crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname);
return true;
}
crm_trace("Cannot fence %s", node->details->uname);
return false;
}
/*!
* \internal
* \brief Copy a node object
*
* \param[in] this_node Node object to copy
*
* \return Newly allocated shallow copy of this_node
* \note This function asserts on errors and is guaranteed to return non-NULL.
*/
pe_node_t *
pe__copy_node(const pe_node_t *this_node)
{
pe_node_t *new_node = NULL;
CRM_ASSERT(this_node != NULL);
new_node = calloc(1, sizeof(pe_node_t));
CRM_ASSERT(new_node != NULL);
new_node->rsc_discover_mode = this_node->rsc_discover_mode;
new_node->weight = this_node->weight;
new_node->fixed = this_node->fixed;
new_node->details = this_node->details;
return new_node;
}
/* any node in list1 or list2 and not in the other gets a score of -INFINITY */
void
node_list_exclude(GHashTable * hash, GList *list, gboolean merge_scores)
{
GHashTable *result = hash;
pe_node_t *other_node = NULL;
GList *gIter = list;
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
other_node = pe_find_node_id(list, node->details->id);
if (other_node == NULL) {
node->weight = -INFINITY;
} else if (merge_scores) {
node->weight = pcmk__add_scores(node->weight, other_node->weight);
}
}
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
other_node = pe_hash_table_lookup(result, node->details->id);
if (other_node == NULL) {
pe_node_t *new_node = pe__copy_node(node);
new_node->weight = -INFINITY;
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
}
}
/*!
* \internal
* \brief Create a node hash table from a node list
*
* \param[in] list Node list
*
* \return Hash table equivalent of node list
*/
GHashTable *
pe__node_list2table(GList *list)
{
GHashTable *result = NULL;
result = pcmk__strkey_table(NULL, free);
for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
pe_node_t *new_node = pe__copy_node((pe_node_t *) gIter->data);
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
return result;
}
gint
sort_node_uname(gconstpointer a, gconstpointer b)
{
return pcmk__numeric_strcasecmp(((const pe_node_t *) a)->details->uname,
((const pe_node_t *) b)->details->uname);
}
/*!
* \internal
* \brief Output node weights to stdout
*
* \param[in] rsc Use allowed nodes for this resource
* \param[in] comment Text description to prefix lines with
* \param[in] nodes If rsc is not specified, use these nodes
*/
static void
pe__output_node_weights(pe_resource_t *rsc, const char *comment,
GHashTable *nodes, pe_working_set_t *data_set)
{
pcmk__output_t *out = data_set->priv;
char score[128]; // Stack-allocated since this is called frequently
// Sort the nodes so the output is consistent for regression tests
GList *list = g_list_sort(g_hash_table_get_values(nodes), sort_node_uname);
for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
score2char_stack(node->weight, score, sizeof(score));
out->message(out, "node-weight", rsc, comment, node->details->uname, score);
}
g_list_free(list);
}
/*!
* \internal
* \brief Log node weights at trace level
*
* \param[in] file Caller's filename
* \param[in] function Caller's function name
* \param[in] line Caller's line number
* \param[in] rsc Use allowed nodes for this resource
* \param[in] comment Text description to prefix lines with
* \param[in] nodes If rsc is not specified, use these nodes
*/
static void
pe__log_node_weights(const char *file, const char *function, int line,
pe_resource_t *rsc, const char *comment, GHashTable *nodes)
{
GHashTableIter iter;
pe_node_t *node = NULL;
char score[128]; // Stack-allocated since this is called frequently
// Don't waste time if we're not tracing at this point
pcmk__log_else(LOG_TRACE, return);
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
score2char_stack(node->weight, score, sizeof(score));
if (rsc) {
qb_log_from_external_source(function, file,
"%s: %s allocation score on %s: %s",
LOG_TRACE, line, 0,
comment, rsc->id,
node->details->uname, score);
} else {
qb_log_from_external_source(function, file, "%s: %s = %s",
LOG_TRACE, line, 0,
comment, node->details->uname,
score);
}
}
}
/*!
* \internal
* \brief Log or output node weights
*
* \param[in] file Caller's filename
* \param[in] function Caller's function name
* \param[in] line Caller's line number
* \param[in] to_log Log if true, otherwise output
* \param[in] rsc Use allowed nodes for this resource
* \param[in] comment Text description to prefix lines with
* \param[in] nodes Use these nodes
*/
void
pe__show_node_weights_as(const char *file, const char *function, int line,
bool to_log, pe_resource_t *rsc, const char *comment,
GHashTable *nodes, pe_working_set_t *data_set)
{
if (rsc != NULL && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
// Don't show allocation scores for orphans
return;
}
if (nodes == NULL) {
// Nothing to show
return;
}
if (to_log) {
pe__log_node_weights(file, function, line, rsc, comment, nodes);
} else {
pe__output_node_weights(rsc, comment, nodes, data_set);
}
// If this resource has children, repeat recursively for each
if (rsc && rsc->children) {
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
pe__show_node_weights_as(file, function, line, to_log, child,
comment, child->allowed_nodes, data_set);
}
}
}
gint
sort_rsc_index(gconstpointer a, gconstpointer b)
{
const pe_resource_t *resource1 = (const pe_resource_t *)a;
const pe_resource_t *resource2 = (const pe_resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->sort_index > resource2->sort_index) {
return -1;
}
if (resource1->sort_index < resource2->sort_index) {
return 1;
}
return 0;
}
gint
sort_rsc_priority(gconstpointer a, gconstpointer b)
{
const pe_resource_t *resource1 = (const pe_resource_t *)a;
const pe_resource_t *resource2 = (const pe_resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->priority > resource2->priority) {
return -1;
}
if (resource1->priority < resource2->priority) {
return 1;
}
return 0;
}
static enum pe_quorum_policy
effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
{
enum pe_quorum_policy policy = data_set->no_quorum_policy;
if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
policy = no_quorum_ignore;
} else if (data_set->no_quorum_policy == no_quorum_demote) {
switch (rsc->role) {
case RSC_ROLE_PROMOTED:
case RSC_ROLE_UNPROMOTED:
if (rsc->next_role > RSC_ROLE_UNPROMOTED) {
pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED,
"no-quorum-policy=demote");
}
policy = no_quorum_ignore;
break;
default:
policy = no_quorum_stop;
break;
}
}
return policy;
}
static void
add_singleton(pe_working_set_t *data_set, pe_action_t *action)
{
if (data_set->singletons == NULL) {
data_set->singletons = pcmk__strkey_table(NULL, NULL);
}
g_hash_table_insert(data_set->singletons, action->uuid, action);
}
static pe_action_t *
lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
{
if (data_set->singletons == NULL) {
return NULL;
}
return g_hash_table_lookup(data_set->singletons, action_uuid);
}
/*!
* \internal
* \brief Find an existing action that matches arguments
*
* \param[in] key Action key to match
* \param[in] rsc Resource to match (if any)
* \param[in] node Node to match (if any)
* \param[in] data_set Cluster working set
*
* \return Existing action that matches arguments (or NULL if none)
*/
static pe_action_t *
find_existing_action(const char *key, pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
GList *matches = NULL;
pe_action_t *action = NULL;
/* When rsc is NULL, it would be quicker to check data_set->singletons,
* but checking all data_set->actions takes the node into account.
*/
matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions),
key, node);
if (matches == NULL) {
return NULL;
}
CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches));
action = matches->data;
g_list_free(matches);
return action;
}
/*!
* \internal
* \brief Create a new action object
*
* \param[in] key Action key
* \param[in] task Action name
* \param[in] rsc Resource that action is for (if any)
* \param[in] node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
* \param[in] for_graph Whether action should be recorded in transition graph
* \param[in] data_set Cluster working set
*
* \return Newly allocated action
* \note This function takes ownership of \p key. It is the caller's
* responsibility to free the return value with pe_free_action().
*/
static pe_action_t *
new_action(char *key, const char *task, pe_resource_t *rsc, pe_node_t *node,
bool optional, bool for_graph, pe_working_set_t *data_set)
{
pe_action_t *action = calloc(1, sizeof(pe_action_t));
CRM_ASSERT(action != NULL);
action->rsc = rsc;
action->task = strdup(task); CRM_ASSERT(action->task != NULL);
action->uuid = key;
action->extra = pcmk__strkey_table(free, free);
action->meta = pcmk__strkey_table(free, free);
if (node) {
action->node = pe__copy_node(node);
}
if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
// Resource history deletion for a node can be done on the DC
pe__set_action_flags(action, pe_action_dc);
}
pe__set_action_flags(action, pe_action_runnable);
if (optional) {
pe__set_action_flags(action, pe_action_optional);
} else {
pe__clear_action_flags(action, pe_action_optional);
}
if (rsc != NULL) {
guint interval_ms = 0;
action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
parse_op_key(key, NULL, NULL, &interval_ms);
unpack_operation(action, action->op_entry, rsc->container, data_set,
interval_ms);
}
if (for_graph) {
pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
(optional? "optional" : "required"),
data_set->action_id, key, task,
((rsc == NULL)? "no resource" : rsc->id),
((node == NULL)? "no node" : node->details->uname));
action->id = data_set->action_id++;
data_set->actions = g_list_prepend(data_set->actions, action);
if (rsc == NULL) {
add_singleton(data_set, action);
} else {
rsc->actions = g_list_prepend(rsc->actions, action);
}
}
return action;
}
/*!
* \internal
* \brief Evaluate node attribute values for an action
*
* \param[in] action Action to unpack attributes for
* \param[in] data_set Cluster working set
*/
static void
unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
{
if (!pcmk_is_set(action->flags, pe_action_have_node_attrs)
&& (action->op_entry != NULL)) {
pe_rule_eval_data_t rule_data = {
.node_hash = action->node->details->attrs,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pe__set_action_flags(action, pe_action_have_node_attrs);
pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
&rule_data, action->extra, NULL,
FALSE, data_set);
}
}
/*!
* \internal
* \brief Update an action's optional flag
*
* \param[in] action Action to update
* \param[in] optional Requested optional status
*/
static void
update_action_optional(pe_action_t *action, gboolean optional)
{
// Force a non-recurring action to be optional if its resource is unmanaged
if ((action->rsc != NULL) && (action->node != NULL)
&& !pcmk_is_set(action->flags, pe_action_pseudo)
&& !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
&& (g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)",
action->uuid, action->node->details->uname,
action->rsc->id);
pe__set_action_flags(action, pe_action_optional);
// We shouldn't clear runnable here because ... something
// Otherwise require the action if requested
} else if (!optional) {
pe__clear_action_flags(action, pe_action_optional);
}
}
/*!
* \internal
* \brief Update a resource action's runnable flag
*
* \param[in] action Action to update
* \param[in] for_graph Whether action should be recorded in transition graph
* \param[in] data_set Cluster working set
*
* \note This may also schedule fencing if a stop is unrunnable.
*/
static void
update_resource_action_runnable(pe_action_t *action, bool for_graph,
pe_working_set_t *data_set)
{
if (pcmk_is_set(action->flags, pe_action_pseudo)) {
return;
}
if (action->node == NULL) {
pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)",
action->uuid);
pe__clear_action_flags(action, pe_action_runnable);
} else if (!pcmk_is_set(action->flags, pe_action_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
|| action->node->details->remote_requires_reset)) {
pe__clear_action_flags(action, pe_action_runnable);
do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
"%s on %s is unrunnable (node is offline)",
action->uuid, action->node->details->uname);
if (pcmk_is_set(action->rsc->flags, pe_rsc_managed)
&& for_graph
&& pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
&& !(action->node->details->unclean)) {
pe_fence_node(data_set, action->node, "stop is unrunnable", false);
}
} else if (!pcmk_is_set(action->flags, pe_action_dc)
&& action->node->details->pending) {
pe__clear_action_flags(action, pe_action_runnable);
do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
"Action %s on %s is unrunnable (node is pending)",
action->uuid, action->node->details->uname);
} else if (action->needs == rsc_req_nothing) {
pe_action_set_reason(action, NULL, TRUE);
if (pe__is_guest_node(action->node)
&& !pe_can_fence(data_set, action->node)) {
/* An action that requires nothing usually does not require any
* fencing in order to be runnable. However, there is an exception:
* such an action cannot be completed if it is on a guest node whose
* host is unclean and cannot be fenced.
*/
pe_rsc_debug(action->rsc, "%s on %s is unrunnable "
"(node's host cannot be fenced)",
action->uuid, action->node->details->uname);
pe__clear_action_flags(action, pe_action_runnable);
} else {
pe_rsc_trace(action->rsc,
"%s on %s does not require fencing or quorum",
action->uuid, action->node->details->uname);
pe__set_action_flags(action, pe_action_runnable);
}
} else {
switch (effective_quorum_policy(action->rsc, data_set)) {
case no_quorum_stop:
pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)",
action->uuid, action->node->details->uname);
pe__clear_action_flags(action, pe_action_runnable);
pe_action_set_reason(action, "no quorum", true);
break;
case no_quorum_freeze:
if (!action->rsc->fns->active(action->rsc, TRUE)
|| (action->rsc->next_role > action->rsc->role)) {
pe_rsc_debug(action->rsc,
"%s on %s is unrunnable (no quorum)",
action->uuid, action->node->details->uname);
pe__clear_action_flags(action, pe_action_runnable);
pe_action_set_reason(action, "quorum freeze", true);
}
break;
default:
//pe_action_set_reason(action, NULL, TRUE);
pe__set_action_flags(action, pe_action_runnable);
break;
}
}
}
/*!
* \internal
* \brief Update a resource object's flags for a new action on it
*
* \param[in] rsc Resource that action is for (if any)
* \param[in] action New action
*/
static void
update_resource_flags_for_action(pe_resource_t *rsc, pe_action_t *action)
{
/* @COMPAT pe_rsc_starting and pe_rsc_stopping are not actually used
* within Pacemaker, and should be deprecated and eventually removed
*/
if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pe_rsc_stopping);
} else if (pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
if (pcmk_is_set(action->flags, pe_action_runnable)) {
pe__set_resource_flags(rsc, pe_rsc_starting);
} else {
pe__clear_resource_flags(rsc, pe_rsc_starting);
}
}
}
/*!
* \brief Create or update an action object
*
* \param[in] rsc Resource that action is for (if any)
* \param[in] key Action key (must be non-NULL)
* \param[in] task Action name (must be non-NULL)
* \param[in] on_node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
* \param[in] save_action Whether action should be recorded in transition graph
* \param[in] data_set Cluster working set
*
* \return Action object corresponding to arguments
* \note This function takes ownership of (and might free) \p key. If
* \p save_action is true, \p data_set will own the returned action,
* otherwise it is the caller's responsibility to free the return value
* with pe_free_action().
*/
pe_action_t *
custom_action(pe_resource_t *rsc, char *key, const char *task,
pe_node_t *on_node, gboolean optional, gboolean save_action,
pe_working_set_t *data_set)
{
pe_action_t *action = NULL;
CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL));
if (save_action) {
action = find_existing_action(key, rsc, on_node, data_set);
}
if (action == NULL) {
action = new_action(key, task, rsc, on_node, optional, save_action,
data_set);
} else {
free(key);
}
update_action_optional(action, optional);
if (rsc != NULL) {
if (action->node != NULL) {
unpack_action_node_attributes(action, data_set);
}
update_resource_action_runnable(action, save_action, data_set);
if (save_action) {
update_resource_flags_for_action(rsc, action);
}
}
return action;
}
static bool
valid_stop_on_fail(const char *value)
{
return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
}
static const char *
unpack_operation_on_fail(pe_action_t * action)
{
const char *name = NULL;
const char *role = NULL;
const char *on_fail = NULL;
const char *interval_spec = NULL;
const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
&& !valid_stop_on_fail(value)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
"action to default value because '%s' is not "
"allowed for stop", action->rsc->id, value);
return NULL;
} else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) {
// demote on_fail defaults to monitor value for promoted role if present
xmlNode *operation = NULL;
CRM_CHECK(action->rsc != NULL, return NULL);
for (operation = pcmk__xe_first_child(action->rsc->ops_xml);
(operation != NULL) && (value == NULL);
operation = pcmk__xe_next(operation)) {
bool enabled = false;
if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
continue;
}
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!on_fail) {
continue;
} else if (pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && !enabled) {
continue;
} else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei)
|| !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
RSC_ROLE_PROMOTED_LEGACY_S,
NULL)) {
continue;
} else if (crm_parse_interval_spec(interval_spec) == 0) {
continue;
} else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) {
continue;
}
value = on_fail;
}
} else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
value = "ignore";
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
name = crm_element_value(action->op_entry, "name");
role = crm_element_value(action->op_entry, "role");
interval_spec = crm_element_value(action->op_entry,
XML_LRM_ATTR_INTERVAL);
if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei)
&& (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei)
|| !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
RSC_ROLE_PROMOTED_LEGACY_S, NULL)
|| (crm_parse_interval_spec(interval_spec) == 0))) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
"action to default value because 'demote' is not "
"allowed for it", action->rsc->id, name);
return NULL;
}
}
return value;
}
static xmlNode *
find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
{
guint interval_ms = 0;
guint min_interval_ms = G_MAXUINT;
const char *name = NULL;
const char *interval_spec = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
for (operation = pcmk__xe_first_child(rsc->ops_xml);
operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
bool enabled = false;
name = crm_element_value(operation, "name");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
!enabled) {
continue;
}
if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms && (interval_ms < min_interval_ms)) {
min_interval_ms = interval_ms;
op = operation;
}
}
}
return op;
}
static int
unpack_start_delay(const char *value, GHashTable *meta)
{
int start_delay = 0;
if (value != NULL) {
start_delay = crm_get_msec(value);
if (start_delay < 0) {
start_delay = 0;
}
if (meta) {
g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY),
pcmk__itoa(start_delay));
}
}
return start_delay;
}
// true if value contains valid, non-NULL interval origin for recurring op
static bool
unpack_interval_origin(const char *value, xmlNode *xml_obj, guint interval_ms,
crm_time_t *now, long long *start_delay)
{
long long result = 0;
guint interval_sec = interval_ms / 1000;
crm_time_t *origin = NULL;
// Ignore unspecified values and non-recurring operations
if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
return false;
}
// Parse interval origin from text
origin = crm_time_new(value);
if (origin == NULL) {
pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation "
"'%s' because '%s' is not valid",
(ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value);
return false;
}
// Get seconds since origin (negative if origin is in the future)
result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
crm_time_free(origin);
// Calculate seconds from closest interval to now
result = result % interval_sec;
// Calculate seconds remaining until next interval
result = ((result <= 0)? 0 : interval_sec) - result;
crm_info("Calculated a start delay of %llds for operation '%s'",
result,
(ID(xml_obj)? ID(xml_obj) : "(unspecified)"));
if (start_delay != NULL) {
*start_delay = result * 1000; // milliseconds
}
return true;
}
static int
unpack_timeout(const char *value)
{
int timeout_ms = crm_get_msec(value);
if (timeout_ms < 0) {
timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
}
return timeout_ms;
}
int
pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
{
xmlNode *child = NULL;
GHashTable *action_meta = NULL;
const char *timeout_spec = NULL;
int timeout_ms = 0;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
child != NULL; child = crm_next_same_xml(child)) {
if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME),
pcmk__str_casei)) {
timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT);
break;
}
}
if (timeout_spec == NULL && data_set->op_defaults) {
action_meta = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
&rule_data, action_meta, NULL, FALSE, data_set);
timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
// @TODO check meta-attributes (including versioned meta-attributes)
// @TODO maybe use min-interval monitor timeout as default for monitors
timeout_ms = crm_get_msec(timeout_spec);
if (timeout_ms < 0) {
timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
}
if (action_meta != NULL) {
g_hash_table_destroy(action_meta);
}
return timeout_ms;
}
#if ENABLE_VERSIONED_ATTRS
static void
unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj,
guint interval_ms, crm_time_t *now)
{
xmlNode *attrs = NULL;
xmlNode *attr = NULL;
for (attrs = pcmk__xe_first_child(versioned_meta); attrs != NULL;
attrs = pcmk__xe_next(attrs)) {
for (attr = pcmk__xe_first_child(attrs); attr != NULL;
attr = pcmk__xe_next(attr)) {
const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
if (pcmk__str_eq(name, XML_OP_ATTR_START_DELAY, pcmk__str_casei)) {
int start_delay = unpack_start_delay(value, NULL);
crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
} else if (pcmk__str_eq(name, XML_OP_ATTR_ORIGIN, pcmk__str_casei)) {
long long start_delay = 0;
if (unpack_interval_origin(value, xml_obj, interval_ms, now,
&start_delay)) {
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME,
XML_OP_ATTR_START_DELAY);
crm_xml_add_ll(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
}
} else if (pcmk__str_eq(name, XML_ATTR_TIMEOUT, pcmk__str_casei)) {
int timeout_ms = unpack_timeout(value);
crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout_ms);
}
}
}
}
#endif
/*!
* \brief Unpack operation XML into an action structure
*
* Unpack an operation's meta-attributes (normalizing the interval, timeout,
* and start delay values as integer milliseconds), requirements, and
* failure policy.
*
* \param[in,out] action Action to unpack into
* \param[in] xml_obj Operation XML (or NULL if all defaults)
* \param[in] container Resource that contains affected resource, if any
* \param[in] data_set Cluster state
* \param[in] interval_ms How frequently to perform the operation
*/
static void
unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container,
pe_working_set_t * data_set, guint interval_ms)
{
int timeout_ms = 0;
const char *value = NULL;
bool is_probe = false;
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *rsc_details = NULL;
#endif
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
.provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
.agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
};
pe_op_eval_data_t op_rule_data = {
.op_name = action->task,
.interval = interval_ms
};
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = &op_rule_data
};
CRM_CHECK(action && action->rsc, return);
is_probe = pcmk_is_probe(action->task, interval_ms);
// Cluster-wide
pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
action->meta, NULL, FALSE, data_set);
// Determine probe default timeout differently
if (is_probe) {
xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
if (min_interval_mon) {
value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
if (value) {
crm_trace("\t%s: Setting default timeout to minimum-interval "
"monitor's timeout '%s'", action->uuid, value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
strdup(value));
}
}
}
if (xml_obj) {
xmlAttrPtr xIter = NULL;
// take precedence over defaults
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
action->meta, NULL, TRUE, data_set);
#if ENABLE_VERSIONED_ATTRS
rsc_details = pe_rsc_action_details(action);
pe_eval_versioned_attributes(data_set->input, xml_obj,
XML_TAG_ATTR_SETS, &rule_data,
rsc_details->versioned_parameters,
NULL);
pe_eval_versioned_attributes(data_set->input, xml_obj,
XML_TAG_META_SETS, &rule_data,
rsc_details->versioned_meta,
NULL);
#endif
/* Anything set as an XML property has highest precedence.
* This ensures we use the name and interval from the tag.
*/
for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(xml_obj, prop_name);
g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
}
}
g_hash_table_remove(action->meta, "id");
// Normalize interval to milliseconds
if (interval_ms > 0) {
g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL),
crm_strdup_printf("%u", interval_ms));
} else {
g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL);
}
/*
* Timeout order of precedence:
* 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params
* and task is start or a probe; pcmk_monitor_timeout works
* by default for a recurring monitor)
* 2. explicit op timeout on the primitive
* 3. default op timeout
* a. if probe, then min-interval monitor's timeout
* b. else, in XML_CIB_TAG_OPCONFIG
* 4. CRM_DEFAULT_OP_TIMEOUT_S
*
* #1 overrides general rule of XML property having highest
* precedence.
*/
if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
pcmk_ra_cap_fence_params)
&& (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
|| is_probe)) {
GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set);
value = g_hash_table_lookup(params, "pcmk_monitor_timeout");
if (value) {
crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', "
"overriding default", action->uuid, value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
strdup(value));
}
}
// Normalize timeout to positive milliseconds
value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
timeout_ms = unpack_timeout(value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
pcmk__itoa(timeout_ms));
if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) {
action->needs = rsc_req_nothing;
value = "nothing (not start or promote)";
} else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
action->needs = rsc_req_stonith;
value = "fencing";
} else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
action->needs = rsc_req_quorum;
value = "quorum";
} else {
action->needs = rsc_req_nothing;
value = "nothing";
}
pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
value = unpack_operation_on_fail(action);
if (value == NULL) {
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
action->on_fail = action_fail_block;
g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
value = "block"; // The above could destroy the original string
} else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
action->on_fail = action_fail_fence;
value = "node fencing";
if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
"operation '%s' to 'stop' because 'fence' is not "
"valid when fencing is disabled", action->uuid);
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
}
} else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
action->on_fail = action_fail_standby;
value = "node standby";
} else if (pcmk__strcase_any_of(value, "ignore", "nothing", NULL)) {
action->on_fail = action_fail_ignore;
value = "ignore";
} else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
action->on_fail = action_fail_migrate;
value = "force migration";
} else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
} else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate)";
} else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
if (container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate)";
} else {
value = NULL;
}
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
action->on_fail = action_fail_demote;
value = "demote instance";
} else {
pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
value = NULL;
}
/* defaults */
if (value == NULL && container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate) (default)";
/* For remote nodes, ensure that any failure that results in dropping an
* active connection to the node results in fencing of the node.
*
* There are only two action failures that don't result in fencing.
* 1. probes - probe failures are expected.
* 2. start - a start failure indicates that an active connection does not already
* exist. The user can set op on-fail=fence if they really want to fence start
* failures. */
} else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed))
&& pe__resource_is_remote_conn(action->rsc, data_set)
&& !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei)
&& (interval_ms == 0))
&& !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop unmanaged remote node (enforcing default)";
} else {
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
value = "fence remote node (default)";
} else {
value = "recover remote node connection (default)";
}
if (action->rsc->remote_reconnect_ms) {
action->fail_role = RSC_ROLE_STOPPED;
}
action->on_fail = action_fail_reset_remote;
}
} else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
action->on_fail = action_fail_fence;
value = "resource fence (default)";
} else {
action->on_fail = action_fail_block;
value = "resource block (default)";
}
} else if (value == NULL) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate) (default)";
}
pe_rsc_trace(action->rsc, "%s failure handling: %s",
action->uuid, value);
value = NULL;
if (xml_obj != NULL) {
value = g_hash_table_lookup(action->meta, "role_after_failure");
if (value) {
pe_warn_once(pe_wo_role_after,
"Support for role_after_failure is deprecated and will be removed in a future release");
}
}
if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
action->fail_role = text2role(value);
}
/* defaults */
if (action->fail_role == RSC_ROLE_UNKNOWN) {
if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
action->fail_role = RSC_ROLE_UNPROMOTED;
} else {
action->fail_role = RSC_ROLE_STARTED;
}
}
pe_rsc_trace(action->rsc, "%s failure results in: %s",
action->uuid, role2text(action->fail_role));
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
if (value) {
unpack_start_delay(value, action->meta);
} else {
long long start_delay = 0;
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
&start_delay)) {
g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
crm_strdup_printf("%lld", start_delay));
}
}
#if ENABLE_VERSIONED_ATTRS
unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms,
data_set->now);
#endif
}
static xmlNode *
find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled)
{
guint interval_ms = 0;
gboolean do_retry = TRUE;
char *local_key = NULL;
const char *name = NULL;
const char *interval_spec = NULL;
char *match_key = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
retry:
for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
bool enabled = false;
name = crm_element_value(operation, "name");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
!enabled) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
match_key = pcmk__op_key(rsc->id, name, interval_ms);
if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
op = operation;
}
free(match_key);
if (rsc->clone_name) {
match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
op = operation;
}
free(match_key);
}
if (op != NULL) {
free(local_key);
return op;
}
}
}
free(local_key);
if (do_retry == FALSE) {
return NULL;
}
do_retry = FALSE;
if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
local_key = pcmk__op_key(rsc->id, "migrate", 0);
key = local_key;
goto retry;
} else if (strstr(key, "_notify_")) {
local_key = pcmk__op_key(rsc->id, "notify", 0);
key = local_key;
goto retry;
}
return NULL;
}
xmlNode *
find_rsc_op_entry(pe_resource_t * rsc, const char *key)
{
return find_rsc_op_entry_helper(rsc, key, FALSE);
}
/*
* Used by the HashTable for-loop
*/
void
print_str_str(gpointer key, gpointer value, gpointer user_data)
{
crm_trace("%s%s %s ==> %s",
user_data == NULL ? "" : (char *)user_data,
user_data == NULL ? "" : ": ", (char *)key, (char *)value);
}
void
pe_free_action(pe_action_t * action)
{
if (action == NULL) {
return;
}
g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
if (action->extra) {
g_hash_table_destroy(action->extra);
}
if (action->meta) {
g_hash_table_destroy(action->meta);
}
#if ENABLE_VERSIONED_ATTRS
if (action->rsc) {
pe_free_rsc_action_details(action);
}
#endif
free(action->cancel_task);
free(action->reason);
free(action->task);
free(action->uuid);
free(action->node);
free(action);
}
GList *
find_recurring_actions(GList *input, pe_node_t * not_on_node)
{
const char *value = NULL;
GList *result = NULL;
GList *gIter = input;
CRM_CHECK(input != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS);
if (value == NULL) {
/* skip */
} else if (pcmk__str_eq(value, "0", pcmk__str_casei)) {
/* skip */
} else if (pcmk__str_eq(CRMD_ACTION_CANCEL, action->task, pcmk__str_casei)) {
/* skip */
} else if (not_on_node == NULL) {
crm_trace("(null) Found: %s", action->uuid);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
/* skip */
} else if (action->node->details != not_on_node->details) {
crm_trace("Found: %s", action->uuid);
result = g_list_prepend(result, action);
}
}
return result;
}
enum action_tasks
get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic)
{
enum action_tasks task = text2task(name);
if (rsc == NULL) {
return task;
} else if (allow_non_atomic == FALSE || rsc->variant == pe_native) {
switch (task) {
case stopped_rsc:
case started_rsc:
case action_demoted:
case action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id);
return task - 1;
default:
break;
}
}
return task;
}
pe_action_t *
find_first_action(GList *input, const char *uuid, const char *task, pe_node_t * on_node)
{
GList *gIter = NULL;
CRM_CHECK(uuid || task, return NULL);
for (gIter = input; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
continue;
} else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) {
continue;
} else if (on_node == NULL) {
return action;
} else if (action->node == NULL) {
continue;
} else if (on_node->details == action->node->details) {
return action;
}
}
return NULL;
}
GList *
find_actions(GList *input, const char *key, const pe_node_t *on_node)
{
GList *gIter = input;
GList *result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
continue;
} else if (on_node == NULL) {
crm_trace("Action %s matches (ignoring node)", key);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
crm_trace("Action %s matches (unallocated, assigning to %s)",
key, on_node->details->uname);
action->node = pe__copy_node(on_node);
result = g_list_prepend(result, action);
} else if (on_node->details == action->node->details) {
crm_trace("Action %s on %s matches", key, on_node->details->uname);
result = g_list_prepend(result, action);
}
}
return result;
}
GList *
find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
{
GList *result = NULL;
CRM_CHECK(key != NULL, return NULL);
if (on_node == NULL) {
return NULL;
}
for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if ((action->node != NULL)
&& pcmk__str_eq(key, action->uuid, pcmk__str_casei)
&& pcmk__str_eq(on_node->details->id, action->node->details->id,
pcmk__str_casei)) {
crm_trace("Action %s on %s matches", key, on_node->details->uname);
result = g_list_prepend(result, action);
}
}
return result;
}
/*!
* \brief Find all actions of given type for a resource
*
* \param[in] rsc Resource to search
* \param[in] node Find only actions scheduled on this node
* \param[in] task Action name to search for
* \param[in] require_node If TRUE, NULL node or action node will not match
*
* \return List of actions found (or NULL if none)
* \note If node is not NULL and require_node is FALSE, matching actions
* without a node will be assigned to node.
*/
GList *
pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
const char *task, bool require_node)
{
GList *result = NULL;
char *key = pcmk__op_key(rsc->id, task, 0);
if (require_node) {
result = find_actions_exact(rsc->actions, key, node);
} else {
result = find_actions(rsc->actions, key, node);
}
free(key);
return result;
}
static void
resource_node_score(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag)
{
pe_node_t *match = NULL;
if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never))
&& pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) {
/* This string comparision may be fragile, but exclusive resources and
* exclusive nodes should not have the symmetric_default constraint
* applied to them.
*/
return;
} else if (rsc->children) {
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
resource_node_score(child_rsc, node, score, tag);
}
}
pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score);
match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
match = pe__copy_node(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
}
match->weight = pcmk__add_scores(match->weight, score);
}
void
resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag,
pe_working_set_t * data_set)
{
if (node != NULL) {
resource_node_score(rsc, node, score, tag);
} else if (data_set != NULL) {
GList *gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node_iter = (pe_node_t *) gIter->data;
resource_node_score(rsc, node_iter, score, tag);
}
} else {
GHashTableIter iter;
pe_node_t *node_iter = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
resource_node_score(rsc, node_iter, score, tag);
}
}
if (node == NULL && score == -INFINITY) {
if (rsc->allocated_to) {
crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname);
free(rsc->allocated_to);
rsc->allocated_to = NULL;
}
}
}
#define sort_return(an_int, why) do { \
free(a_uuid); \
free(b_uuid); \
crm_trace("%s (%d) %c %s (%d) : %s", \
a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \
b_xml_id, b_call_id, why); \
return an_int; \
} while(0)
gint
sort_op_by_callid(gconstpointer a, gconstpointer b)
{
int a_call_id = -1;
int b_call_id = -1;
char *a_uuid = NULL;
char *b_uuid = NULL;
const xmlNode *xml_a = a;
const xmlNode *xml_b = b;
const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID);
const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID);
if (pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_casei)) {
/* We have duplicate lrm_rsc_op entries in the status
* section which is unlikely to be a good thing
* - we can handle it easily enough, but we need to get
* to the bottom of why it's happening.
*/
pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
sort_return(0, "duplicate");
}
crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
if (a_call_id == -1 && b_call_id == -1) {
/* both are pending ops so it doesn't matter since
* stops are never pending
*/
sort_return(0, "pending");
} else if (a_call_id >= 0 && a_call_id < b_call_id) {
sort_return(-1, "call id");
} else if (b_call_id >= 0 && a_call_id > b_call_id) {
sort_return(1, "call id");
} else if (b_call_id >= 0 && a_call_id == b_call_id) {
/*
* The op and last_failed_op are the same
* Order on last-rc-change
*/
time_t last_a = -1;
time_t last_b = -1;
crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
crm_trace("rc-change: %lld vs %lld",
(long long) last_a, (long long) last_b);
if (last_a >= 0 && last_a < last_b) {
sort_return(-1, "rc-change");
} else if (last_b >= 0 && last_a > last_b) {
sort_return(1, "rc-change");
}
sort_return(0, "rc-change");
} else {
/* One of the inputs is a pending operation
* Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
*/
int a_id = -1;
int b_id = -1;
const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC);
const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC);
CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic a");
}
if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic b");
}
/* try to determine the relative age of the operation...
* some pending operations (e.g. a start) may have been superseded
* by a subsequent stop
*
* [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
*/
if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) {
/*
* some of the logic in here may be redundant...
*
* if the UUID from the TE doesn't match then one better
* be a pending operation.
* pending operations don't survive between elections and joins
* because we query the LRM directly
*/
if (b_call_id == -1) {
sort_return(-1, "transition + call");
} else if (a_call_id == -1) {
sort_return(1, "transition + call");
}
} else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
sort_return(-1, "transition");
} else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
sort_return(1, "transition");
}
}
/* we should never end up here */
CRM_CHECK(FALSE, sort_return(0, "default"));
}
time_t
get_effective_time(pe_working_set_t * data_set)
{
if(data_set) {
if (data_set->now == NULL) {
crm_trace("Recording a new 'now'");
data_set->now = crm_time_new(NULL);
}
return crm_time_get_seconds_since_epoch(data_set->now);
}
crm_trace("Defaulting to 'now'");
return time(NULL);
}
gboolean
get_target_role(pe_resource_t * rsc, enum rsc_role_e * role)
{
enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
CRM_CHECK(role != NULL, return FALSE);
if (pcmk__str_eq(value, "started", pcmk__str_null_matches | pcmk__str_casei)
|| pcmk__str_eq("default", value, pcmk__str_casei)) {
return FALSE;
}
local_role = text2role(value);
if (local_role == RSC_ROLE_UNKNOWN) {
pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
"because '%s' is not valid", rsc->id, value);
return FALSE;
} else if (local_role > RSC_ROLE_STARTED) {
if (pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) {
if (local_role > RSC_ROLE_UNPROMOTED) {
/* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
return FALSE;
}
} else {
pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
"because '%s' only makes sense for promotable "
"clones", rsc->id, value);
return FALSE;
}
}
*role = local_role;
return TRUE;
}
gboolean
order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order)
{
GList *gIter = NULL;
pe_action_wrapper_t *wrapper = NULL;
GList *list = NULL;
if (order == pe_order_none) {
return FALSE;
}
if (lh_action == NULL || rh_action == NULL) {
return FALSE;
}
crm_trace("Creating action wrappers for ordering: %s then %s",
lh_action->uuid, rh_action->uuid);
/* Ensure we never create a dependency on ourselves... it's happened */
CRM_ASSERT(lh_action != rh_action);
/* Filter dups, otherwise update_action_states() has too much work to do */
gIter = lh_action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data;
if (after->action == rh_action && (after->type & order)) {
return FALSE;
}
}
wrapper = calloc(1, sizeof(pe_action_wrapper_t));
wrapper->action = rh_action;
wrapper->type = order;
list = lh_action->actions_after;
list = g_list_prepend(list, wrapper);
lh_action->actions_after = list;
wrapper = calloc(1, sizeof(pe_action_wrapper_t));
wrapper->action = lh_action;
wrapper->type = order;
list = rh_action->actions_before;
list = g_list_prepend(list, wrapper);
rh_action->actions_before = list;
return TRUE;
}
pe_action_t *
get_pseudo_op(const char *name, pe_working_set_t * data_set)
{
pe_action_t *op = lookup_singleton(data_set, name);
if (op == NULL) {
op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
}
return op;
}
void
destroy_ticket(gpointer data)
{
pe_ticket_t *ticket = data;
if (ticket->state) {
g_hash_table_destroy(ticket->state);
}
free(ticket->id);
free(ticket);
}
pe_ticket_t *
ticket_new(const char *ticket_id, pe_working_set_t * data_set)
{
pe_ticket_t *ticket = NULL;
if (pcmk__str_empty(ticket_id)) {
return NULL;
}
if (data_set->tickets == NULL) {
data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
}
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = calloc(1, sizeof(pe_ticket_t));
if (ticket == NULL) {
crm_err("Cannot allocate ticket '%s'", ticket_id);
return NULL;
}
crm_trace("Creaing ticket entry for %s", ticket_id);
ticket->id = strdup(ticket_id);
ticket->granted = FALSE;
ticket->last_granted = -1;
ticket->standby = FALSE;
ticket->state = pcmk__strkey_table(free, free);
g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
}
return ticket;
}
const char *rsc_printable_id(pe_resource_t *rsc)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
return ID(rsc->xml);
}
return rsc->id;
}
void
pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
{
pe__clear_resource_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe__clear_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
}
}
void
pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag)
{
for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
pe_resource_t *r = (pe_resource_t *) lpc->data;
pe__clear_resource_flags_recursive(r, flag);
}
}
void
pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
{
pe__set_resource_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe__set_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
}
}
static GList *
find_unfencing_devices(GList *candidates, GList *matches)
{
for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
pe_resource_t *candidate = gIter->data;
const char *provides = g_hash_table_lookup(candidate->meta,
PCMK_STONITH_PROVIDES);
const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES);
if(candidate->children) {
matches = find_unfencing_devices(candidate->children, matches);
} else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) {
continue;
} else if (pcmk__str_eq(provides, "unfencing", pcmk__str_casei) || pcmk__str_eq(requires, "unfencing", pcmk__str_casei)) {
matches = g_list_prepend(matches, candidate);
}
}
return matches;
}
static int
node_priority_fencing_delay(pe_node_t * node, pe_working_set_t * data_set)
{
int member_count = 0;
int online_count = 0;
int top_priority = 0;
int lowest_priority = 0;
GList *gIter = NULL;
// `priority-fencing-delay` is disabled
if (data_set->priority_fencing_delay <= 0) {
return 0;
}
/* No need to request a delay if the fencing target is not a normal cluster
* member, for example if it's a remote node or a guest node. */
if (node->details->type != node_member) {
return 0;
}
// No need to request a delay if the fencing target is in our partition
if (node->details->online) {
return 0;
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *n = gIter->data;
if (n->details->type != node_member) {
continue;
}
member_count ++;
if (n->details->online) {
online_count++;
}
if (member_count == 1
|| n->details->priority > top_priority) {
top_priority = n->details->priority;
}
if (member_count == 1
|| n->details->priority < lowest_priority) {
lowest_priority = n->details->priority;
}
}
// No need to delay if we have more than half of the cluster members
if (online_count > member_count / 2) {
return 0;
}
/* All the nodes have equal priority.
* Any configured corresponding `pcmk_delay_base/max` will be applied. */
if (lowest_priority == top_priority) {
return 0;
}
if (node->details->priority < top_priority) {
return 0;
}
return data_set->priority_fencing_delay;
}
pe_action_t *
pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason,
bool priority_delay, pe_working_set_t * data_set)
{
char *op_key = NULL;
pe_action_t *stonith_op = NULL;
if(op == NULL) {
op = data_set->stonith_action;
}
op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
stonith_op = lookup_singleton(data_set, op_key);
if(stonith_op == NULL) {
stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
if (pe__is_guest_or_remote_node(node)
&& pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
/* Extra work to detect device changes on remotes
*
* We may do this for all nodes in the future, but for now
* the pcmk__check_action_config() based stuff works fine.
*/
long max = 1024;
long digests_all_offset = 0;
long digests_secure_offset = 0;
char *digests_all = calloc(max, sizeof(char));
char *digests_secure = calloc(max, sizeof(char));
GList *matches = find_unfencing_devices(data_set->resources, NULL);
for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
pe_resource_t *match = gIter->data;
const char *agent = g_hash_table_lookup(match->meta,
XML_ATTR_TYPE);
op_digest_cache_t *data = NULL;
data = pe__compare_fencing_digest(match, agent, node, data_set);
if(data->rc == RSC_DIGEST_ALL) {
optional = FALSE;
crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id);
if (!pcmk__is_daemon && data_set->priv != NULL) {
pcmk__output_t *out = data_set->priv;
out->info(out, "notice: Unfencing %s (remote): because the definition of %s changed",
node->details->uname, match->id);
}
}
digests_all_offset += snprintf(
digests_all+digests_all_offset, max-digests_all_offset,
"%s:%s:%s,", match->id, agent, data->digest_all_calc);
digests_secure_offset += snprintf(
digests_secure+digests_secure_offset, max-digests_secure_offset,
"%s:%s:%s,", match->id, agent, data->digest_secure_calc);
}
g_hash_table_insert(stonith_op->meta,
strdup(XML_OP_ATTR_DIGESTS_ALL),
digests_all);
g_hash_table_insert(stonith_op->meta,
strdup(XML_OP_ATTR_DIGESTS_SECURE),
digests_secure);
}
} else {
free(op_key);
}
if (data_set->priority_fencing_delay > 0
/* It's a suitable case where `priority-fencing-delay` applies.
* At least add `priority-fencing-delay` field as an indicator. */
&& (priority_delay
- /* Re-calculate priority delay for the suitable case when
- * pe_fence_op() is called again by stage6() after node priority has
- * been actually calculated with native_add_running() */
+ /* The priority delay needs to be recalculated if this function has
+ * been called by schedule_fencing_and_shutdowns() after node
+ * priority has already been calculated by native_add_running().
+ */
|| g_hash_table_lookup(stonith_op->meta,
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) {
/* Add `priority-fencing-delay` to the fencing op even if it's 0 for
* the targeting node. So that it takes precedence over any possible
* `pcmk_delay_base/max`.
*/
char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, data_set));
g_hash_table_insert(stonith_op->meta,
strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
delay_s);
}
if(optional == FALSE && pe_can_fence(data_set, node)) {
pe__clear_action_flags(stonith_op, pe_action_optional);
pe_action_set_reason(stonith_op, reason, false);
} else if(reason && stonith_op->reason == NULL) {
stonith_op->reason = strdup(reason);
}
return stonith_op;
}
void
trigger_unfencing(
pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set)
{
if (!pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
/* No resources require it */
return;
} else if ((rsc != NULL)
&& !pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
/* Wasn't a stonith device */
return;
} else if(node
&& node->details->online
&& node->details->unclean == FALSE
&& node->details->shutdown == FALSE) {
pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set);
if(dependency) {
order_actions(unfence, dependency, pe_order_optional);
}
} else if(rsc) {
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
trigger_unfencing(rsc, node, reason, dependency, data_set);
}
}
}
}
gboolean
add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
{
pe_tag_t *tag = NULL;
GList *gIter = NULL;
gboolean is_existing = FALSE;
CRM_CHECK(tags && tag_name && obj_ref, return FALSE);
tag = g_hash_table_lookup(tags, tag_name);
if (tag == NULL) {
tag = calloc(1, sizeof(pe_tag_t));
if (tag == NULL) {
return FALSE;
}
tag->id = strdup(tag_name);
tag->refs = NULL;
g_hash_table_insert(tags, strdup(tag_name), tag);
}
for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
const char *existing_ref = (const char *) gIter->data;
if (pcmk__str_eq(existing_ref, obj_ref, pcmk__str_none)){
is_existing = TRUE;
break;
}
}
if (is_existing == FALSE) {
tag->refs = g_list_append(tag->refs, strdup(obj_ref));
crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref);
}
return TRUE;
}
/*!
* \internal
* \brief Create an action reason string based on the action itself
*
* \param[in] action Action to create reason string for
* \param[in] flag Action flag that was cleared
*
* \return Newly allocated string suitable for use as action reason
* \note It is the caller's responsibility to free() the result.
*/
char *
pe__action2reason(pe_action_t *action, enum pe_action_flags flag)
{
const char *change = NULL;
switch (flag) {
case pe_action_runnable:
case pe_action_migrate_runnable:
change = "unrunnable";
break;
case pe_action_optional:
change = "required";
break;
default:
// Bug: caller passed unsupported flag
CRM_CHECK(change != NULL, change = "");
break;
}
return crm_strdup_printf("%s%s%s %s", change,
(action->rsc == NULL)? "" : " ",
(action->rsc == NULL)? "" : action->rsc->id,
action->task);
}
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
{
if (action->reason != NULL && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
action->uuid, action->reason, crm_str(reason));
} else if (action->reason == NULL) {
pe_rsc_trace(action->rsc, "Set %s reason to '%s'",
action->uuid, crm_str(reason));
} else {
// crm_assert(action->reason != NULL && !overwrite);
return;
}
pcmk__str_update(&action->reason, reason);
}
/*!
* \internal
* \brief Check whether shutdown has been requested for a node
*
* \param[in] node Node to check
*
* \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise
* \note This differs from simply using node->details->shutdown in that it can
* be used before that has been determined (and in fact to determine it),
* and it can also be used to distinguish requested shutdown from implicit
* shutdown of remote nodes by virtue of their connection stopping.
*/
bool
pe__shutdown_requested(pe_node_t *node)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches);
}
/*!
* \internal
* \brief Update a data set's "recheck by" time
*
* \param[in] recheck Epoch time when recheck should happen
* \param[in,out] data_set Current working set
*/
void
pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
{
if ((recheck > get_effective_time(data_set))
&& ((data_set->recheck_by == 0)
|| (data_set->recheck_by > recheck))) {
data_set->recheck_by = recheck;
}
}
/*!
* \internal
* \brief Wrapper for pe_unpack_nvpairs() using a cluster working set
*/
void
pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name,
pe_rule_eval_data_t *rule_data, GHashTable *hash,
const char *always_first, gboolean overwrite,
pe_working_set_t *data_set)
{
crm_time_t *next_change = crm_time_new_undefined();
pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash,
always_first, overwrite, next_change);
if (crm_time_is_defined(next_change)) {
time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
pe__update_recheck_time(recheck, data_set);
}
crm_time_free(next_change);
}
bool
pe__resource_is_disabled(pe_resource_t *rsc)
{
const char *target_role = NULL;
CRM_CHECK(rsc != NULL, return false);
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
if ((target_role_e == RSC_ROLE_STOPPED)
|| ((target_role_e == RSC_ROLE_UNPROMOTED)
&& pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable))) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Create an action to clear a resource's history from CIB
*
* \param[in] rsc Resource to clear
* \param[in] node Node to clear history on
*
* \return New action to clear resource history
*/
pe_action_t *
pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
char *key = NULL;
CRM_ASSERT(rsc && node);
key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
data_set);
}
bool
pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list)
{
for (GList *ele = rsc->running_on; ele; ele = ele->next) {
pe_node_t *node = (pe_node_t *) ele->data;
if (pcmk__str_in_list(node->details->uname, node_list,
pcmk__str_star_matches|pcmk__str_casei)) {
return true;
}
}
return false;
}
bool
pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node)
{
return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any(rsc, only_node));
}
GList *
pe__filter_rsc_list(GList *rscs, GList *filter)
{
GList *retval = NULL;
for (GList *gIter = rscs; gIter; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
/* I think the second condition is safe here for all callers of this
* function. If not, it needs to move into pe__node_text.
*/
if (pcmk__str_in_list(rsc_printable_id(rsc), filter, pcmk__str_star_matches) ||
(rsc->parent && pcmk__str_in_list(rsc_printable_id(rsc->parent), filter, pcmk__str_star_matches))) {
retval = g_list_prepend(retval, rsc);
}
}
return retval;
}
GList *
pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
GList *nodes = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
/* Nothing was given so return a list of all node names. Or, '*' was
* given. This would normally fall into the pe__unames_with_tag branch
* where it will return an empty list. Catch it here instead.
*/
nodes = g_list_prepend(nodes, strdup("*"));
} else {
pe_node_t *node = pe_find_node(data_set->nodes, s);
if (node) {
/* The given string was a valid uname for a node. Return a
* singleton list containing just that uname.
*/
nodes = g_list_prepend(nodes, strdup(s));
} else {
/* The given string was not a valid uname. It's either a tag or
* it's a typo or something. In the first case, we'll return a
* list of all the unames of the nodes with the given tag. In the
* second case, we'll return a NULL pointer and nothing will
* get displayed.
*/
nodes = pe__unames_with_tag(data_set, s);
}
}
return nodes;
}
GList *
pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
GList *resources = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
resources = g_list_prepend(resources, strdup("*"));
} else {
pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s,
pe_find_renamed|pe_find_any);
if (rsc) {
/* A colon in the name we were given means we're being asked to filter
* on a specific instance of a cloned resource. Put that exact string
* into the filter list. Otherwise, use the printable ID of whatever
* resource was found that matches what was asked for.
*/
if (strstr(s, ":") != NULL) {
resources = g_list_prepend(resources, strdup(rsc->id));
} else {
resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc)));
}
} else {
/* The given string was not a valid resource name. It's either
* a tag or it's a typo or something. See build_uname_list for
* more detail.
*/
resources = pe__rscs_with_tag(data_set, s);
}
}
return resources;
}
xmlNode *
pe__failed_probe_for_rsc(pe_resource_t *rsc, const char *name)
{
pe_resource_t *parent = uber_parent(rsc);
const char *rsc_id = rsc->id;
if (rsc->variant == pe_clone) {
rsc_id = pe__clone_child_id(rsc);
} else if (parent->variant == pe_clone) {
rsc_id = pe__clone_child_id(parent);
}
for (xmlNode *xml_op = pcmk__xml_first_child(rsc->cluster->failed); xml_op != NULL;
xml_op = pcmk__xml_next(xml_op)) {
const char *value = NULL;
char *op_id = NULL;
/* This resource operation is not a failed probe. */
if (!pcmk_xe_mask_probe_failure(xml_op)) {
continue;
}
/* This resource operation was not run on the given node. Note that if name is
* NULL, this will always succeed.
*/
value = crm_element_value(xml_op, XML_LRM_ATTR_TARGET);
if (value == NULL || !pcmk__str_eq(value, name, pcmk__str_casei|pcmk__str_null_matches)) {
continue;
}
/* This resource operation has no operation_key. */
value = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
if (!parse_op_key(value ? value : ID(xml_op), &op_id, NULL, NULL)) {
continue;
}
/* This resource operation's ID does not match the rsc_id we are looking for. */
if (!pcmk__str_eq(op_id, rsc_id, pcmk__str_none)) {
free(op_id);
continue;
}
free(op_id);
return xml_op;
}
return NULL;
}