diff --git a/cts/lab/patterns.py b/cts/lab/patterns.py
index 511d6c25f6..38d6e1065a 100644
--- a/cts/lab/patterns.py
+++ b/cts/lab/patterns.py
@@ -1,415 +1,415 @@
""" Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS)
"""
__copyright__ = "Copyright 2008-2022 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import sys, os
from cts.CTSvars import *
patternvariants = {}
class BasePatterns(object):
def __init__(self, name):
self.name = name
patternvariants[name] = self
self.ignore = [
"avoid confusing Valgrind",
# Logging bug in some versions of libvirtd
r"libvirtd.*: internal error: Failed to parse PCI config address",
# pcs can log this when node is fenced, but fencing is OK in some
# tests (and we will catch it in pacemaker logs when not OK)
r"pcs.daemon:No response from: .* request: get_configs, error:",
]
self.BadNews = []
self.components = {}
self.commands = {
"StatusCmd" : "crmadmin -t 60 -S %s 2>/dev/null",
"CibQuery" : "cibadmin -Ql",
"CibAddXml" : "cibadmin --modify -c --xml-text %s",
"CibDelXpath" : "cibadmin --delete --xpath %s",
# 300,000 == 5 minutes
"RscRunning" : CTSvars.CRM_DAEMON_DIR + "/cts-exec-helper -R -r %s",
"CIBfile" : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml",
"TmpDir" : "/tmp",
"BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
"FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
# tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit
# tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated
# tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1
# tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null
"ReduceCommCmd" : "",
"RestoreCommCmd" : "tc qdisc del dev lo root",
"MaintenanceModeOn" : "cibadmin --modify -c --xml-text ''",
"MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
"StandbyCmd" : "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null",
"StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null",
}
self.search = {
"Pat:DC_IDLE" : "pacemaker-controld.*State transition.*-> S_IDLE",
# This won't work if we have multiple partitions
"Pat:Local_started" : "%s\W.*controller successfully started",
"Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC",
"Pat:DC_started" : r"%s\W.*State transition.*-> S_IDLE",
"Pat:We_stopped" : "%s\W.*OVERRIDE THIS PATTERN",
"Pat:They_stopped" : "%s\W.*LOST:.* %s ",
"Pat:They_dead" : "node %s.*: is dead",
"Pat:They_up" : "%s %s\W.*OVERRIDE THIS PATTERN",
"Pat:TransitionComplete" : "Transition status: Complete: complete",
"Pat:Fencing_start" : r"Requesting peer fencing .* targeting %s",
"Pat:Fencing_ok" : r"pacemaker-fenced.*:\s*Operation .* targeting %s by .* for .*@.*: OK",
"Pat:Fencing_recover" : r"pacemaker-schedulerd.*: Recover %s",
"Pat:Fencing_active" : r"stonith resource .* is active on 2 nodes (attempting recovery)",
"Pat:Fencing_probe" : r"pacemaker-controld.* Result of probe operation for %s on .*: Error",
"Pat:RscOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok",
"Pat:RscOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of %s ",
"Pat:CloneOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of (%s|%s) ",
"Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok",
"Pat:NodeFenced" : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK",
"Pat:FenceOpOK" : "Operation .* targeting %s using .* returned 0",
}
def get_component(self, key):
if key in self.components:
return self.components[key]
print("Unknown component '%s' for %s" % (key, self.name))
return []
def get_patterns(self, key):
if key == "BadNews":
return self.BadNews
elif key == "BadNewsIgnore":
return self.ignore
elif key == "Commands":
return self.commands
elif key == "Search":
return self.search
elif key == "Components":
return self.components
def __getitem__(self, key):
if key == "Name":
return self.name
elif key in self.commands:
return self.commands[key]
elif key in self.search:
return self.search[key]
else:
print("Unknown template '%s' for %s" % (key, self.name))
return None
class crm_corosync(BasePatterns):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
BasePatterns.__init__(self, name)
self.commands.update({
"StartCmd" : "service corosync start && service pacemaker start",
"StopCmd" : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop",
"EpochCmd" : "crm_node -e",
"QuorumCmd" : "crm_node -q",
"PartitionCmd" : "crm_node -p",
})
self.search.update({
# Close enough ... "Corosync Cluster Engine exiting normally" isn't
# printed reliably.
"Pat:We_stopped" : "%s\W.*Unloading all Corosync service engines",
"Pat:They_stopped" : "%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:They_dead" : "pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:They_up" : "\W%s\W.*pacemaker-controld.*Node %s state is now member",
"Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(",
# "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes()
"Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)",
"Pat:ChildRespawn" : "%s\W.*pacemakerd.*Respawning %s subdaemon after unexpected exit",
"Pat:InfraUp" : "%s\W.*corosync.*Initializing transport",
"Pat:PacemakerUp" : "%s\W.*pacemakerd.*Starting Pacemaker",
})
self.ignore = self.ignore + [
r"crm_mon:",
r"crmadmin:",
r"update_trace_data",
r"async_notify:.*strange, client not found",
r"Parse error: Ignoring unknown option .*nodename",
r"error.*: Operation 'reboot' .* using FencingFail returned ",
r"getinfo response error: 1$",
r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE",
r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)",
]
self.BadNews = [
r"[^(]error:",
r"crit:",
r"ERROR:",
r"CRIT:",
r"Shutting down...NOW",
r"Timer I_TERMINATE just popped",
r"input=I_ERROR",
r"input=I_FAIL",
r"input=I_INTEGRATED cause=C_TIMER_POPPED",
r"input=I_FINALIZED cause=C_TIMER_POPPED",
r"input=I_ERROR",
r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting",
r"schedulerd.*Attempting recovery of resource",
r"is taking more than 2x its timeout",
r"Confirm not received from",
r"Welcome reply not received from",
r"Attempting to schedule .* after a stop",
r"Resource .* was active at shutdown",
r"duplicate entries for call_id",
r"Search terminated:",
r":global_timer_callback",
r"Faking parameter digest creation",
r"Parameters to .* action changed:",
r"Parameters to .* changed",
r"pacemakerd.*\[[0-9]+\] terminated( with signal| as IPC server|$)",
r"pacemaker-schedulerd.*Recover .*\(.* -\> .*\)",
r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
r"Peer is not part of our cluster",
r"We appear to be in an election loop",
r"Unknown node -> we will not deliver message",
r"(Blackbox dump requested|Problem detected)",
r"pacemakerd.*Could not connect to Cluster Configuration Database API",
r"Receiving messages from a node we think is dead",
r"share the same cluster nodeid",
r"share the same name",
#r"crm_ipc_send:.*Request .* failed",
#r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received",
# Not inherently bad, but worth tracking
#r"No need to invoke the TE",
#r"ping.*: DEBUG: Updated connected = 0",
#r"Digest mis-match:",
r"pacemaker-controld:.*Transition failed: terminated",
r"Local CIB .* differs from .*:",
r"warn.*:\s*Continuing but .* will NOT be used",
r"warn.*:\s*Cluster configuration file .* is corrupt",
#r"Executing .* fencing operation",
r"Election storm",
r"stalled the FSA with pending inputs",
]
self.components["common-ignore"] = [
r"Pending action:",
r"resource( was|s were) active at shutdown",
r"pending LRM operations at shutdown",
r"Lost connection to the CIB manager",
r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported",
r"pacemaker-controld.*:\s*Performing A_EXIT_1 - forcefully exiting ",
r".*:\s*Requesting fencing \([^)]+\) of node ",
r"(Blackbox dump requested|Problem detected)",
]
self.components["corosync-ignore"] = [
r"Could not connect to Corosync CFG: CS_ERR_LIBRARY",
r"error:.*Connection to the CPG API failed: Library error",
r"\[[0-9]+\] exited with status [0-9]+ \(",
r"\[[0-9]+\] terminated with signal 15",
r"pacemaker-based.*error:.*Corosync connection lost",
r"pacemaker-fenced.*error:.*Corosync connection terminated",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state",
r"pacemaker-controld.*error:.*Could not recover from internal error",
r"error:.*Connection to cib_(shm|rw).* (failed|closed)",
r"error:.*cib_(shm|rw) IPC provider disconnected while waiting",
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit: Fencing daemon connection failed",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["corosync"] = [
# We expect each daemon to lose its cluster connection.
# However, if the CIB manager loses its connection first,
# it's possible for another daemon to lose that connection and
# exit before losing the cluster connection.
- r"pacemakerd.*:\s*(crit|error):.*Lost connection to cluster layer",
+ r"pacemakerd.*:\s*warning:.*Lost connection to cluster layer",
r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"schedulerd.*Scheduling Node .* for STONITH",
r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK",
]
self.components["pacemaker-based"] = [
r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102",
r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.* Respawning pacemaker-attrd subdaemon after unexpected exit",
r"pacemakerd.* Respawning pacemaker-based subdaemon after unexpected exit",
r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit",
r"pacemakerd.* Respawning pacemaker-fenced subdaemon after unexpected exit",
r"pacemaker-.* Connection to cib_.* (failed|closed)",
r"pacemaker-attrd.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
r"pacemaker-controld.* State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-based-ignore"] = [
r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["pacemaker-execd"] = [
r"pacemaker-controld.*Connection to executor failed",
r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.* Respawning pacemaker-execd subdaemon after unexpected exit",
r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit",
]
self.components["pacemaker-execd-ignore"] = [
r"pacemaker-(attrd|controld).*Connection to lrmd.* (failed|closed)",
r"pacemaker-(attrd|controld).*Could not execute alert",
]
self.components["pacemaker-controld"] = [
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling Node .* for STONITH",
# Only if the node wasn't the DC: "State transition S_IDLE",
"State transition .* -> S_IDLE",
]
self.components["pacemaker-controld-ignore"] = []
self.components["pacemaker-attrd"] = []
self.components["pacemaker-attrd-ignore"] = []
self.components["pacemaker-schedulerd"] = [
"State transition .* S_RECOVERY",
r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit",
r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
r"Connection to the scheduler failed",
"pacemaker-controld.*I_ERROR.*save_cib_contents",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
"pacemaker-controld.*Could not recover from internal error",
]
self.components["pacemaker-schedulerd-ignore"] = [
r"Connection to pengine.* (failed|closed)",
]
self.components["pacemaker-fenced"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"Fencing daemon connection failed",
r"pacemaker-controld.*Fencer successfully connected",
]
self.components["pacemaker-fenced-ignore"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"crit:.*Fencing daemon connection failed",
r"error:.*Fencer connection failed \(will retry\)",
r"Connection to (fencer|stonith-ng) failed, finalizing .* pending operations",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
# other causes of a transition error logging their own error
# message, which is the usual practice.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self.components["pacemaker-fenced-ignore"].extend(self.components["common-ignore"])
class crm_corosync_docker(crm_corosync):
'''
Patterns for Corosync version 2 cluster manager class
'''
def __init__(self, name):
crm_corosync.__init__(self, name)
self.commands.update({
"StartCmd" : "pcmk_start",
"StopCmd" : "pcmk_stop",
})
class PatternSelector(object):
def __init__(self, name=None):
self.name = name
self.base = BasePatterns("crm-base")
if not name:
crm_corosync("crm-corosync")
elif name == "crm-corosync":
crm_corosync(name)
elif name == "crm-corosync-docker":
crm_corosync_docker(name)
def get_variant(self, variant):
if variant in patternvariants:
return patternvariants[variant]
print("defaulting to crm-base for %s" % variant)
return self.base
def get_patterns(self, variant, kind):
return self.get_variant(variant).get_patterns(kind)
def get_template(self, variant, key):
v = self.get_variant(variant)
return v[key]
def get_component(self, variant, kind):
return self.get_variant(variant).get_component(kind)
def __getitem__(self, key):
return self.get_template(self.name, key)
# python cts/CTSpatt.py -k crm-corosync -t StartCmd
if __name__ == '__main__':
pdir=os.path.dirname(sys.path[0])
sys.path.insert(0, pdir) # So that things work from the source directory
kind=None
template=None
skipthis=None
args=sys.argv[1:]
for i in range(0, len(args)):
if skipthis:
skipthis=None
continue
elif args[i] == "-k" or args[i] == "--kind":
skipthis=1
kind = args[i+1]
elif args[i] == "-t" or args[i] == "--template":
skipthis=1
template = args[i+1]
else:
print("Illegal argument " + args[i])
print(PatternSelector(kind)[template])
diff --git a/include/pcmki/pcmki_sched_allocate.h b/include/pcmki/pcmki_sched_allocate.h
index 1193854174..caa747d6e7 100644
--- a/include/pcmki/pcmki_sched_allocate.h
+++ b/include/pcmki/pcmki_sched_allocate.h
@@ -1,210 +1,222 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__PCMKI_PCMKI_SCHED_ALLOCATE__H
# define PCMK__PCMKI_PCMKI_SCHED_ALLOCATE__H
# include
# include
# include
# include
# include
# include
# include
# include
struct resource_alloc_functions_s {
GHashTable *(*merge_weights) (pe_resource_t *, const char *, GHashTable *, const char *, float,
enum pe_weights);
pe_node_t *(*allocate) (pe_resource_t *, pe_node_t *, pe_working_set_t *);
void (*create_actions) (pe_resource_t *, pe_working_set_t *);
gboolean(*create_probe) (pe_resource_t *, pe_node_t *, pe_action_t *, gboolean, pe_working_set_t *);
void (*internal_constraints) (pe_resource_t *, pe_working_set_t *);
void (*rsc_colocation_lh) (pe_resource_t *, pe_resource_t *,
pcmk__colocation_t *, pe_working_set_t *);
void (*rsc_colocation_rh) (pe_resource_t *, pe_resource_t *,
pcmk__colocation_t *, pe_working_set_t *);
/*!
* \internal
* \brief Create list of all resources in colocations with a given resource
*
* Given a resource, create a list of all resources involved in mandatory
* colocations with it, whether directly or indirectly via chained colocations.
*
* \param[in] rsc Resource to add to colocated list
* \param[in] orig_rsc Resource originally requested
* \param[in] colocated_rscs Existing list
*
* \return List of given resource and all resources involved in colocations
*
* \note This function is recursive; top-level callers should pass NULL as
* \p colocated_rscs and \p orig_rsc, and the desired resource as
* \p rsc. The recursive calls will use other values.
*/
GList *(*colocated_resources)(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *colocated_rscs);
void (*rsc_location) (pe_resource_t *, pe__location_t *);
enum pe_action_flags (*action_flags) (pe_action_t *, pe_node_t *);
enum pe_graph_flags (*update_actions) (pe_action_t *, pe_action_t *,
pe_node_t *, enum pe_action_flags,
enum pe_action_flags,
enum pe_ordering,
pe_working_set_t *data_set);
void (*output_actions)(pe_resource_t *rsc);
void (*expand) (pe_resource_t *, pe_working_set_t *);
void (*append_meta) (pe_resource_t * rsc, xmlNode * xml);
/*!
* \internal
* \brief Add a resource's utilization to a table of utilization values
*
* This function is used when summing the utilization of a resource and all
* resources colocated with it, to determine whether a node has sufficient
* capacity. Given a resource and a table of utilization values, it will add
* the resource's utilization to the existing values, if the resource has
* not yet been allocated to a node.
*
* \param[in] rsc Resource with utilization to add
* \param[in] orig_rsc Resource being allocated (for logging only)
* \param[in] all_rscs List of all resources that will be summed
* \param[in] utilization Table of utilization values to add to
*/
void (*add_utilization)(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
+
+ /*!
+ * \internal
+ * \brief Apply a shutdown lock for a resource, if appropriate
+ *
+ * \param[in] rsc Resource to check for shutdown lock
+ */
+ void (*shutdown_lock)(pe_resource_t *rsc);
};
GHashTable *pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs,
GHashTable *nodes, const char *attr,
float factor, uint32_t flags);
GHashTable *pcmk__group_merge_weights(pe_resource_t *rsc, const char *rhs,
GHashTable *nodes, const char *attr,
float factor, uint32_t flags);
pe_node_t *pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *preferred,
pe_working_set_t *data_set);
extern void native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set);
extern void native_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set);
void native_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set);
void native_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set);
extern enum pe_action_flags native_action_flags(pe_action_t * action, pe_node_t * node);
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint);
extern void native_expand(pe_resource_t * rsc, pe_working_set_t * data_set);
extern gboolean native_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete,
gboolean force, pe_working_set_t * data_set);
extern void native_append_meta(pe_resource_t * rsc, xmlNode * xml);
void pcmk__primitive_add_utilization(pe_resource_t *rsc,
pe_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization);
+void pcmk__primitive_shutdown_lock(pe_resource_t *rsc);
pe_node_t *pcmk__group_allocate(pe_resource_t *rsc, pe_node_t *preferred,
pe_working_set_t *data_set);
extern void group_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set);
extern void group_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set);
void group_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set);
void group_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set);
extern enum pe_action_flags group_action_flags(pe_action_t * action, pe_node_t * node);
void group_rsc_location(pe_resource_t *rsc, pe__location_t *constraint);
extern void group_expand(pe_resource_t * rsc, pe_working_set_t * data_set);
extern void group_append_meta(pe_resource_t * rsc, xmlNode * xml);
void pcmk__group_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
+void pcmk__group_shutdown_lock(pe_resource_t *rsc);
pe_node_t *pcmk__bundle_allocate(pe_resource_t *rsc, pe_node_t *preferred,
pe_working_set_t *data_set);
void pcmk__bundle_create_actions(pe_resource_t *rsc,
pe_working_set_t *data_set);
gboolean pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node,
pe_action_t *complete, gboolean force,
pe_working_set_t *data_set);
void pcmk__bundle_internal_constraints(pe_resource_t *rsc,
pe_working_set_t *data_set);
void pcmk__bundle_rsc_colocation_lh(pe_resource_t *dependent,
pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set);
void pcmk__bundle_rsc_colocation_rh(pe_resource_t *dependent,
pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set);
void pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint);
enum pe_action_flags pcmk__bundle_action_flags(pe_action_t *action,
pe_node_t *node);
void pcmk__bundle_expand(pe_resource_t *rsc, pe_working_set_t *data_set);
void pcmk__bundle_append_meta(pe_resource_t *rsc, xmlNode *xml);
void pcmk__bundle_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
+void pcmk__bundle_shutdown_lock(pe_resource_t *rsc);
pe_node_t *pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *preferred,
pe_working_set_t *data_set);
extern void clone_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set);
extern void clone_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set);
void clone_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set);
void clone_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set);
void clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint);
extern enum pe_action_flags clone_action_flags(pe_action_t * action, pe_node_t * node);
extern void clone_expand(pe_resource_t * rsc, pe_working_set_t * data_set);
extern gboolean clone_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete,
gboolean force, pe_working_set_t * data_set);
extern void clone_append_meta(pe_resource_t * rsc, xmlNode * xml);
void pcmk__clone_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
+void pcmk__clone_shutdown_lock(pe_resource_t *rsc);
void pcmk__add_promotion_scores(pe_resource_t *rsc);
pe_node_t *pcmk__set_instance_roles(pe_resource_t *rsc,
pe_working_set_t *data_set);
void create_promotable_actions(pe_resource_t *rsc, pe_working_set_t *data_set);
void promote_demote_constraints(pe_resource_t *rsc, pe_working_set_t *data_set);
void promotable_constraints(pe_resource_t *rsc, pe_working_set_t *data_set);
void promotable_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set);
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then,
pe_node_t *node,
enum pe_action_flags flags,
enum pe_action_flags filter,
enum pe_ordering type,
pe_working_set_t *data_set);
enum pe_graph_flags group_update_actions(pe_action_t *first, pe_action_t *then,
pe_node_t *node,
enum pe_action_flags flags,
enum pe_action_flags filter,
enum pe_ordering type,
pe_working_set_t *data_set);
enum pe_graph_flags pcmk__multi_update_actions(pe_action_t *first,
pe_action_t *then,
pe_node_t *node,
enum pe_action_flags flags,
enum pe_action_flags filter,
enum pe_ordering type,
pe_working_set_t *data_set);
void pcmk__log_transition_summary(const char *filename);
void clone_create_pseudo_actions(
pe_resource_t * rsc, GList *children, notify_data_t **start_notify, notify_data_t **stop_notify, pe_working_set_t * data_set);
#endif
diff --git a/include/pcmki/pcmki_scheduler.h b/include/pcmki/pcmki_scheduler.h
index 42b653c00f..39b99cbad5 100644
--- a/include/pcmki/pcmki_scheduler.h
+++ b/include/pcmki/pcmki_scheduler.h
@@ -1,99 +1,98 @@
/*
* Copyright 2014-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__PCMKI_PCMKI_SCHEDULER__H
# define PCMK__PCMKI_PCMKI_SCHEDULER__H
typedef struct rsc_ticket_s rsc_ticket_t;
# include
# include
# include
# include
# include
# include
# include
enum pe_weights {
pe_weights_none = 0x0,
pe_weights_init = 0x1,
pe_weights_forward = 0x4,
pe_weights_positive = 0x8,
pe_weights_rollback = 0x10,
};
typedef struct {
const char *id;
const char *node_attribute;
pe_resource_t *dependent; // The resource being colocated
pe_resource_t *primary; // The resource the dependent is colocated with
int dependent_role; // Colocation applies only if dependent has this role
int primary_role; // Colocation applies only if primary has this role
int score;
bool influence; // Whether dependent influences active primary placement
} pcmk__colocation_t;
enum loss_ticket_policy_e {
loss_ticket_stop,
loss_ticket_demote,
loss_ticket_fence,
loss_ticket_freeze
};
struct rsc_ticket_s {
const char *id;
pe_resource_t *rsc_lh;
pe_ticket_t *ticket;
enum loss_ticket_policy_e loss_policy;
int role_lh;
};
-extern gboolean stage2(pe_working_set_t * data_set);
extern gboolean stage5(pe_working_set_t * data_set);
extern gboolean stage6(pe_working_set_t * data_set);
void pcmk__unpack_constraints(pe_working_set_t *data_set);
extern void add_maintenance_update(pe_working_set_t *data_set);
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
pe_working_set_t *data_set);
extern const char *transition_idle_timeout;
/*!
* \internal
* \brief Check whether colocation's left-hand preferences should be considered
*
* \param[in] colocation Colocation constraint
* \param[in] rsc Right-hand instance (normally this will be
* colocation->primary, which NULL will be treated as,
* but for clones or bundles with multiple instances
* this can be a particular instance)
*
* \return true if colocation influence should be effective, otherwise false
*/
static inline bool
pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
const pe_resource_t *rsc)
{
if (rsc == NULL) {
rsc = colocation->primary;
}
/* The left hand of a colocation influences the right hand's location
* if the influence option is true, or the right hand is not yet active.
*/
return colocation->influence || (rsc->running_on == NULL);
}
#endif
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index f222f6eacc..a46aa34291 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -1,386 +1,390 @@
/*
* Copyright 2021-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__LIBPACEMAKER_PRIVATE__H
# define PCMK__LIBPACEMAKER_PRIVATE__H
/* This header is for the sole use of libpacemaker, so that functions can be
* declared with G_GNUC_INTERNAL for efficiency.
*/
#include // pe_action_t, pe_node_t, pe_working_set_t
// Actions (pcmk_sched_actions.c)
G_GNUC_INTERNAL
void pcmk__update_action_for_orderings(pe_action_t *action,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__log_action(const char *pre_text, pe_action_t *action, bool details);
G_GNUC_INTERNAL
pe_action_t *pcmk__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
bool optional, bool runnable);
G_GNUC_INTERNAL
pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name,
guint interval_ms, pe_node_t *node);
G_GNUC_INTERNAL
pe_action_t *pcmk__new_shutdown_action(pe_node_t *node,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__action_locks_rsc_to_node(const pe_action_t *action);
G_GNUC_INTERNAL
void pcmk__deduplicate_action_inputs(pe_action_t *action);
G_GNUC_INTERNAL
void pcmk__output_actions(pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
xmlNode *xml_op);
G_GNUC_INTERNAL
void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set);
// Producing transition graphs (pcmk_graph_producer.c)
G_GNUC_INTERNAL
bool pcmk__graph_has_loop(pe_action_t *init_action, pe_action_t *action,
pe_action_wrapper_t *input);
G_GNUC_INTERNAL
void pcmk__add_action_to_graph(pe_action_t *action, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__create_graph(pe_working_set_t *data_set);
// Fencing (pcmk_sched_fencing.c)
G_GNUC_INTERNAL
void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node,
pe_action_t *action, enum pe_ordering order,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__fence_guest(pe_node_t *node, pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__node_unfenced(pe_node_t *node);
G_GNUC_INTERNAL
bool pcmk__is_unfence_device(const pe_resource_t *rsc,
const pe_working_set_t *data_set);
// Injected scheduler inputs (pcmk_sched_injections.c)
void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
pcmk_injections_t *injections);
// Constraints of any type (pcmk_sched_constraints.c)
G_GNUC_INTERNAL
pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id);
G_GNUC_INTERNAL
xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__valid_resource_or_tag(pe_working_set_t *data_set, const char *id,
pe_resource_t **rsc, pe_tag_t **tag);
G_GNUC_INTERNAL
bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
bool convert_rsc, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__create_internal_constraints(pe_working_set_t *data_set);
// Location constraints
G_GNUC_INTERNAL
void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc,
int node_weight, const char *discover_mode,
pe_node_t *foo_node,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__apply_locations(pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__apply_location(pe__location_t *constraint, pe_resource_t *rsc);
// Colocation constraints
enum pcmk__coloc_affects {
pcmk__coloc_affects_nothing = 0,
pcmk__coloc_affects_location,
pcmk__coloc_affects_role,
};
G_GNUC_INTERNAL
enum pcmk__coloc_affects pcmk__colocation_affects(pe_resource_t *dependent,
pe_resource_t *primary,
pcmk__colocation_t *constraint,
bool preview);
G_GNUC_INTERNAL
void pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
pe_resource_t *primary,
pcmk__colocation_t *constraint);
G_GNUC_INTERNAL
void pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
pe_resource_t *primary,
pcmk__colocation_t *constraint);
G_GNUC_INTERNAL
void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__new_colocation(const char *id, const char *node_attr, int score,
pe_resource_t *dependent, pe_resource_t *primary,
const char *dependent_role, const char *primary_role,
bool influence, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__block_colocated_starts(pe_action_t *action,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__new_ordering(pe_resource_t *lh_rsc, char *lh_task,
pe_action_t *lh_action, pe_resource_t *rh_rsc,
char *rh_task, pe_action_t *rh_action,
enum pe_ordering type, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__disable_invalid_orderings(pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__order_stops_before_shutdown(pe_node_t *node,
pe_action_t *shutdown_op,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__apply_orderings(pe_working_set_t *data_set);
/*!
* \internal
* \brief Create a new ordering between two resource actions
*
* \param[in] lh_rsc Resource for 'first' action
* \param[in] rh_rsc Resource for 'then' action
* \param[in] lh_task Action key for 'first' action
* \param[in] rh_task Action key for 'then' action
* \param[in] flags Bitmask of enum pe_ordering flags
* \param[in] data_set Cluster working set to add ordering to
*/
#define pcmk__order_resource_actions(lh_rsc, lh_task, rh_rsc, rh_task, \
flags, data_set) \
pcmk__new_ordering((lh_rsc), pcmk__op_key((lh_rsc)->id, (lh_task), 0), \
NULL, \
(rh_rsc), pcmk__op_key((rh_rsc)->id, (rh_task), 0), \
NULL, (flags), (data_set))
#define pcmk__order_starts(rsc1, rsc2, type, data_set) \
pcmk__order_resource_actions((rsc1), CRMD_ACTION_START, \
(rsc2), CRMD_ACTION_START, (type), (data_set))
#define pcmk__order_stops(rsc1, rsc2, type, data_set) \
pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP, \
(rsc2), CRMD_ACTION_STOP, (type), (data_set))
G_GNUC_INTERNAL
void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__is_failed_remote_node(pe_node_t *node);
G_GNUC_INTERNAL
void pcmk__order_remote_connection_actions(pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__rsc_corresponds_to_guest(pe_resource_t *rsc, pe_node_t *node);
G_GNUC_INTERNAL
pe_node_t *pcmk__connection_host_for_action(pe_action_t *action);
G_GNUC_INTERNAL
void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, pe_action_t *action);
// Groups (pcmk_sched_group.c)
G_GNUC_INTERNAL
GList *pcmk__group_colocated_resources(pe_resource_t *rsc,
pe_resource_t *orig_rsc,
GList *colocated_rscs);
// Bundles (pcmk_sched_bundle.c)
G_GNUC_INTERNAL
void pcmk__output_bundle_actions(pe_resource_t *rsc);
// Injections (pcmk_injections.c)
G_GNUC_INTERNAL
xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node,
bool up);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
const char *resource,
const char *lrm_name,
const char *rclass,
const char *rtype,
const char *rprovider);
G_GNUC_INTERNAL
void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node,
const char *resource, const char *task,
guint interval_ms, int rc);
G_GNUC_INTERNAL
xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
lrmd_event_data_t *op, int target_rc);
// Nodes (pcmk_sched_nodes.c)
G_GNUC_INTERNAL
bool pcmk__node_available(const pe_node_t *node);
G_GNUC_INTERNAL
bool pcmk__any_node_available(GHashTable *nodes);
G_GNUC_INTERNAL
GHashTable *pcmk__copy_node_table(GHashTable *nodes);
G_GNUC_INTERNAL
GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node,
pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__apply_node_health(pe_working_set_t *data_set);
// Clone notifictions (pcmk_sched_notif.c)
G_GNUC_INTERNAL
void pcmk__create_notifications(pe_resource_t *rsc, notify_data_t *n_data);
G_GNUC_INTERNAL
notify_data_t *pcmk__clone_notif_pseudo_ops(pe_resource_t *rsc,
const char *task,
pe_action_t *action,
pe_action_t *complete);
G_GNUC_INTERNAL
void pcmk__free_notification_data(notify_data_t *n_data);
G_GNUC_INTERNAL
void pcmk__order_notifs_after_fencing(pe_action_t *action, pe_resource_t *rsc,
pe_action_t *stonith_op);
// Functions applying to more than one variant (pcmk_sched_resource.c)
G_GNUC_INTERNAL
void pcmk__set_allocation_methods(pe_working_set_t *data_set);
G_GNUC_INTERNAL
bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
const xmlNode *rsc_entry, bool active_on_node);
G_GNUC_INTERNAL
GList *pcmk__rscs_matching_id(const char *id, pe_working_set_t *data_set);
G_GNUC_INTERNAL
GList *pcmk__colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *colocated_rscs);
G_GNUC_INTERNAL
void pcmk__output_resource_actions(pe_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__assign_primitive(pe_resource_t *rsc, pe_node_t *chosen, bool force);
G_GNUC_INTERNAL
bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force);
G_GNUC_INTERNAL
void pcmk__unassign_resource(pe_resource_t *rsc);
G_GNUC_INTERNAL
bool pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node,
pe_resource_t **failed);
G_GNUC_INTERNAL
void pcmk__sort_resources(pe_working_set_t *data_set);
// Functions related to probes (pcmk_sched_probes.c)
G_GNUC_INTERNAL
void pcmk__order_probes(pe_working_set_t *data_set);
G_GNUC_INTERNAL
void pcmk__schedule_probes(pe_working_set_t *data_set);
// Functions related to node utilization (pcmk_sched_utilization.c)
G_GNUC_INTERNAL
int pcmk__compare_node_capacities(const pe_node_t *node1,
const pe_node_t *node2);
G_GNUC_INTERNAL
void pcmk__consume_node_capacity(GHashTable *current_utilization,
pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__release_node_capacity(GHashTable *current_utilization,
pe_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__ban_insufficient_capacity(pe_resource_t *rsc, pe_node_t **prefer,
pe_working_set_t *data_set);
-G_GNUC_INTERNAL void pcmk__create_utilization_constraints(pe_resource_t *rsc,
- GList *allowed_nodes);
+G_GNUC_INTERNAL
+void pcmk__create_utilization_constraints(pe_resource_t *rsc,
+ GList *allowed_nodes);
+
+G_GNUC_INTERNAL
+void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set);
#endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c
index 22a42ec02d..275edb6a30 100644
--- a/lib/pacemaker/pcmk_sched_allocate.c
+++ b/lib/pacemaker/pcmk_sched_allocate.c
@@ -1,818 +1,752 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "libpacemaker_private.h"
CRM_TRACE_INIT_DATA(pacemaker);
/*!
* \internal
* \brief Do deferred action checks after allocation
*
* When unpacking the resource history, the scheduler checks for resource
* configurations that have changed since an action was run. However, at that
* time, bundles using the REMOTE_CONTAINER_HACK don't have their final
* parameter information, so instead they add a deferred check to a list. This
* function processes one entry in that list.
*
* \param[in] rsc Resource that action history is for
* \param[in] node Node that action history is for
* \param[in] rsc_op Action history entry
* \param[in] check Type of deferred check to do
* \param[in] data_set Working set for cluster
*/
static void
check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
enum pe_check_parameters check, pe_working_set_t *data_set)
{
const char *reason = NULL;
op_digest_cache_t *digest_data = NULL;
switch (check) {
case pe_check_active:
if (pcmk__check_action_config(rsc, node, rsc_op)
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
data_set)) {
reason = "action definition changed";
}
break;
case pe_check_last_failure:
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
switch (digest_data->rc) {
case RSC_DIGEST_UNKNOWN:
crm_trace("Resource %s history entry %s on %s has "
"no digest to compare",
rsc->id, ID(rsc_op), node->details->id);
break;
case RSC_DIGEST_MATCH:
break;
default:
reason = "resource parameters have changed";
break;
}
break;
}
if (reason != NULL) {
pe__clear_failcount(rsc, node, reason, data_set);
}
}
/*!
* \internal
* \brief Check whether a resource has failcount clearing scheduled on a node
*
* \param[in] node Node to check
* \param[in] rsc Resource to check
*
* \return true if \p rsc has failcount clearing scheduled on \p node,
* otherwise false
*/
static bool
failcount_clear_action_exists(pe_node_t *node, pe_resource_t *rsc)
{
GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
if (list != NULL) {
g_list_free(list);
return true;
}
return false;
}
/*!
* \internal
* \brief Ban a resource from a node if it reached its failure threshold there
*
* \param[in] rsc Resource to check failure threshold for
* \param[in] node Node to check \p rsc on
*/
static void
check_failure_threshold(pe_resource_t *rsc, pe_node_t *node)
{
// If this is a collective resource, apply recursively to children instead
if (rsc->children != NULL) {
g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
node);
return;
} else if (failcount_clear_action_exists(node, rsc)) {
/* Don't force the resource away from this node due to a failcount
* that's going to be cleared.
*
* @TODO Failcount clearing can be scheduled in
* pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
* stage5() via check_params(). This runs well before then, so it cannot
* detect those, meaning we might check the migration threshold when we
* shouldn't. Worst case, we stop or move the resource, then move it
* back in the next transition.
*/
return;
} else {
pe_resource_t *failed = NULL;
if (pcmk__threshold_reached(rsc, node, &failed)) {
resource_location(failed, node, -INFINITY, "__fail_limit__",
rsc->cluster);
}
}
}
+/*!
+ * \internal
+ * \brief If resource has exclusive discovery, ban node if not allowed
+ *
+ * Location constraints have a resource-discovery option that allows users to
+ * specify where probes are done for the affected resource. If this is set to
+ * exclusive, probes will only be done on nodes listed in exclusive constraints.
+ * This function bans the resource from the node if the node is not listed.
+ *
+ * \param[in] rsc Resource to check
+ * \param[in] node Node to check \p rsc on
+ */
+static void
+apply_exclusive_discovery(pe_resource_t *rsc, pe_node_t *node)
+{
+ if (rsc->exclusive_discover || uber_parent(rsc)->exclusive_discover) {
+ pe_node_t *match = NULL;
+
+ // If this is a collective resource, apply recursively to children
+ g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery, node);
+
+ match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
+ if ((match != NULL)
+ && (match->rsc_discover_mode != pe_discover_exclusive)) {
+ match->weight = -INFINITY;
+ }
+ }
+}
+
/*!
* \internal
* \brief Apply stickiness to a resource if appropriate
*
* \param[in] rsc Resource to check for stickiness
* \param[in] data_set Cluster working set
*/
static void
apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
{
pe_node_t *node = NULL;
// If this is a collective resource, apply recursively to children instead
if (rsc->children != NULL) {
g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
return;
}
/* A resource is sticky if it is managed, has stickiness configured, and is
* active on a single node.
*/
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
|| (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
return;
}
node = rsc->running_on->data;
/* In a symmetric cluster, stickiness can always be used. In an
* asymmetric cluster, we have to check whether the resource is still
* allowed on the node, so we don't keep the resource somewhere it is no
* longer explicitly enabled.
*/
if (!pcmk_is_set(rsc->cluster->flags, pe_flag_symmetric_cluster)
&& (pe_hash_table_lookup(rsc->allowed_nodes,
node->details->id) == NULL)) {
pe_rsc_debug(rsc,
"Ignoring %s stickiness because the cluster is "
"asymmetric and node %s is not explicitly allowed",
rsc->id, node->details->uname);
return;
}
pe_rsc_debug(rsc, "Resource %s has %d stickiness on node %s",
rsc->id, rsc->stickiness, node->details->uname);
resource_location(rsc, node, rsc->stickiness, "stickiness",
rsc->cluster);
}
+/*!
+ * \internal
+ * \brief Apply shutdown locks for all resources as appropriate
+ *
+ * \param[in] data_set Cluster working set
+ */
static void
-rsc_discover_filter(pe_resource_t *rsc, pe_node_t *node)
+apply_shutdown_locks(pe_working_set_t *data_set)
{
- pe_resource_t *top = uber_parent(rsc);
- pe_node_t *match;
-
- if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
+ if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
return;
}
+ for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
- g_list_foreach(rsc->children, (GFunc) rsc_discover_filter, node);
-
- match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
- if (match && match->rsc_discover_mode != pe_discover_exclusive) {
- match->weight = -INFINITY;
- }
-}
-
-static time_t
-shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
-{
- const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
- time_t result = 0;
-
- if (shutdown) {
- long long result_ll;
-
- if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
- result = (time_t) result_ll;
- }
+ rsc->cmds->shutdown_lock(rsc);
}
- return result? result : get_effective_time(data_set);
}
+/*!
+ * \internal
+ * \brief Calculate the number of available nodes in the cluster
+ *
+ * \param[in] data_set Cluster working set
+ */
static void
-apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set)
+count_available_nodes(pe_working_set_t *data_set)
{
- const char *class;
-
- // Only primitives and (uncloned) groups may be locked
- if (rsc->variant == pe_group) {
- g_list_foreach(rsc->children, (GFunc) apply_shutdown_lock, data_set);
- } else if (rsc->variant != pe_native) {
- return;
- }
-
- // Fence devices and remote connections can't be locked
- class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
- if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
- || pe__resource_is_remote_conn(rsc, data_set)) {
- return;
- }
-
- if (rsc->lock_node != NULL) {
- // The lock was obtained from resource history
-
- if (rsc->running_on != NULL) {
- /* The resource was started elsewhere even though it is now
- * considered locked. This shouldn't be possible, but as a
- * failsafe, we don't want to disturb the resource now.
- */
- pe_rsc_info(rsc,
- "Cancelling shutdown lock because %s is already active",
- rsc->id);
- pe__clear_resource_history(rsc, rsc->lock_node, data_set);
- rsc->lock_node = NULL;
- rsc->lock_time = 0;
- }
-
- // Only a resource active on exactly one node can be locked
- } else if (pcmk__list_of_1(rsc->running_on)) {
- pe_node_t *node = rsc->running_on->data;
-
- if (node->details->shutdown) {
- if (node->details->unclean) {
- pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
- rsc->id, node->details->uname);
- } else {
- rsc->lock_node = node;
- rsc->lock_time = shutdown_time(node, data_set);
- }
- }
- }
-
- if (rsc->lock_node == NULL) {
- // No lock needed
+ if (pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
return;
}
- if (data_set->shutdown_lock > 0) {
- time_t lock_expiration = rsc->lock_time + data_set->shutdown_lock;
-
- pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
- rsc->id, rsc->lock_node->details->uname,
- (long long) lock_expiration);
- pe__update_recheck_time(++lock_expiration, data_set);
- } else {
- pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
- rsc->id, rsc->lock_node->details->uname);
- }
-
- // If resource is locked to one node, ban it from all other nodes
- for (GList *item = data_set->nodes; item != NULL; item = item->next) {
- pe_node_t *node = item->data;
+ // @COMPAT for API backward compatibility only (cluster does not use value)
+ for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
+ pe_node_t *node = (pe_node_t *) iter->data;
- if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
- resource_location(rsc, node, -CRM_SCORE_INFINITY,
- XML_CONFIG_ATTR_SHUTDOWN_LOCK, data_set);
+ if ((node != NULL) && (node->weight >= 0) && node->details->online
+ && (node->details->type != node_ping)) {
+ data_set->max_valid_nodes++;
}
}
+ crm_trace("Online node count: %d", data_set->max_valid_nodes);
}
/*
* \internal
- * \brief Stage 2 of cluster status: apply node-specific criteria
+ * \brief Apply node-specific scheduling criteria
*
- * Count known nodes, and apply location constraints, stickiness, and exclusive
- * resource discovery.
+ * After the CIB has been unpacked, process node-specific scheduling criteria
+ * including shutdown locks, location constraints, resource stickiness,
+ * migration thresholds, and exclusive resource discovery.
*/
-gboolean
-stage2(pe_working_set_t * data_set)
+static void
+apply_node_criteria(pe_working_set_t *data_set)
{
- GList *gIter = NULL;
-
- if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
- g_list_foreach(data_set->resources, (GFunc) apply_shutdown_lock, data_set);
- }
-
- if (!pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
- // @COMPAT API backward compatibility
- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
-
- if (node && (node->weight >= 0) && node->details->online
- && (node->details->type != node_ping)) {
- data_set->max_valid_nodes++;
- }
- }
- crm_trace("Online node count: %d", data_set->max_valid_nodes);
- }
-
+ crm_trace("Applying node-specific scheduling criteria");
+ apply_shutdown_locks(data_set);
+ count_available_nodes(data_set);
pcmk__apply_locations(data_set);
g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
- gIter = data_set->nodes;
- for (; gIter != NULL; gIter = gIter->next) {
- GList *gIter2 = NULL;
- pe_node_t *node = (pe_node_t *) gIter->data;
-
- gIter2 = data_set->resources;
- for (; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
+ for (GList *node_iter = data_set->nodes; node_iter != NULL;
+ node_iter = node_iter->next) {
+ for (GList *rsc_iter = data_set->resources; rsc_iter != NULL;
+ rsc_iter = rsc_iter->next) {
+ pe_node_t *node = (pe_node_t *) node_iter->data;
+ pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
check_failure_threshold(rsc, node);
- rsc_discover_filter(rsc, node);
+ apply_exclusive_discovery(rsc, node);
}
}
-
- return TRUE;
}
+/*!
+ * \internal
+ * \brief Allocate resources to nodes
+ *
+ * \param[in] data_set Cluster working set
+ */
static void
-allocate_resources(pe_working_set_t * data_set)
+allocate_resources(pe_working_set_t *data_set)
{
- GList *gIter = NULL;
+ GList *iter = NULL;
+
+ crm_trace("Allocating resources to nodes");
+
+ if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
+ pcmk__sort_resources(data_set);
+ }
+ pcmk__show_node_capacities("Original", data_set);
if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
/* Allocate remote connection resources first (which will also allocate
* any colocation dependencies). If the connection is migrating, always
* prefer the partial migration target.
*/
- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
- if (rsc->is_remote_node == FALSE) {
- continue;
+ for (iter = data_set->resources; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (rsc->is_remote_node) {
+ pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
+ rsc->id);
+ rsc->cmds->allocate(rsc, rsc->partial_migration_target,
+ data_set);
}
- pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
- rsc->id);
- rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
}
}
/* now do the rest of the resources */
- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
- if (rsc->is_remote_node == TRUE) {
- continue;
+ for (iter = data_set->resources; iter != NULL; iter = iter->next) {
+ pe_resource_t *rsc = (pe_resource_t *) iter->data;
+
+ if (!rsc->is_remote_node) {
+ pe_rsc_trace(rsc, "Allocating %s resource '%s'",
+ crm_element_name(rsc->xml), rsc->id);
+ rsc->cmds->allocate(rsc, NULL, data_set);
}
- pe_rsc_trace(rsc, "Allocating %s resource '%s'",
- crm_element_name(rsc->xml), rsc->id);
- rsc->cmds->allocate(rsc, NULL, data_set);
}
+
+ pcmk__show_node_capacities("Remaining", data_set);
}
-// Clear fail counts for orphaned rsc on all online nodes
+/*!
+ * \internal
+ * \brief Schedule fail count clearing on online nodes if resource is orphaned
+ *
+ * \param[in] rsc Resource to check
+ * \param[in] data_set Cluster working set
+ */
static void
-cleanup_orphans(pe_resource_t * rsc, pe_working_set_t * data_set)
+clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
{
- GList *gIter = NULL;
+ if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ return;
+ }
+ crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ /* There's no need to recurse into rsc->children because those
+ * should just be unallocated clone instances.
+ */
- if (node->details->online
- && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
- data_set)) {
+ for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
+ pe_node_t *node = (pe_node_t *) iter->data;
+ pe_action_t *clear_op = NULL;
- pe_action_t *clear_op = NULL;
+ if (!node->details->online) {
+ continue;
+ }
+ if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
+ data_set) == 0) {
+ continue;
+ }
- clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
- data_set);
+ clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set);
- /* We can't use order_action_then_stop() here because its
- * pe_order_preserve breaks things
- */
- pcmk__new_ordering(clear_op->rsc, NULL, clear_op,
- rsc, stop_key(rsc), NULL,
- pe_order_optional, data_set);
- }
+ /* We can't use order_action_then_stop() here because its
+ * pe_order_preserve breaks things
+ */
+ pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
+ NULL, pe_order_optional, data_set);
}
}
gboolean
stage5(pe_working_set_t * data_set)
{
- pcmk__output_t *out = data_set->priv;
GList *gIter = NULL;
- if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
- pcmk__sort_resources(data_set);
- }
-
- gIter = data_set->nodes;
- for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
-
- if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
- out->message(out, "node-capacity", node, "Original");
- }
- }
-
- crm_trace("Allocating services");
- /* Take (next) highest resource, assign it and create its actions */
-
- allocate_resources(data_set);
-
- gIter = data_set->nodes;
- for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
-
- if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
- out->message(out, "node-capacity", node, "Remaining");
- }
- }
-
// Process deferred action checks
pe__foreach_param_check(data_set, check_params);
pe__free_param_checks(data_set);
if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
crm_trace("Calculating needed probes");
pcmk__schedule_probes(data_set);
}
- crm_trace("Handle orphans");
if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
-
- /* There's no need to recurse into rsc->children because those
- * should just be unallocated clone instances.
- */
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
- cleanup_orphans(rsc, data_set);
- }
- }
+ g_list_foreach(data_set->resources,
+ (GFunc) clear_failcounts_if_orphaned, data_set);
}
crm_trace("Creating actions");
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
rsc->cmds->create_actions(rsc, data_set);
}
crm_trace("Creating done");
return TRUE;
}
static gboolean
is_managed(const pe_resource_t * rsc)
{
GList *gIter = rsc->children;
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
return TRUE;
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (is_managed(child_rsc)) {
return TRUE;
}
}
return FALSE;
}
static gboolean
any_managed_resources(pe_working_set_t * data_set)
{
GList *gIter = data_set->resources;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
if (is_managed(rsc)) {
return TRUE;
}
}
return FALSE;
}
/*
* Create dependencies for stonith and shutdown operations
*/
gboolean
stage6(pe_working_set_t * data_set)
{
pe_action_t *dc_down = NULL;
pe_action_t *stonith_op = NULL;
gboolean integrity_lost = FALSE;
gboolean need_stonith = TRUE;
GList *gIter;
GList *stonith_ops = NULL;
GList *shutdown_ops = NULL;
/* Remote ordering constraints need to happen prior to calculating fencing
* because it is one more place we can mark nodes as needing fencing.
*/
pcmk__order_remote_connection_actions(data_set);
crm_trace("Processing fencing and shutdown cases");
if (any_managed_resources(data_set) == FALSE) {
crm_notice("Delaying fencing operations until there are resources to manage");
need_stonith = FALSE;
}
/* Check each node for stonith/shutdown */
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (pe__is_guest_node(node)) {
if (node->details->remote_requires_reset && need_stonith
&& pe_can_fence(data_set, node)) {
pcmk__fence_guest(node, data_set);
}
continue;
}
stonith_op = NULL;
if (node->details->unclean
&& need_stonith && pe_can_fence(data_set, node)) {
stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", FALSE, data_set);
pe_warn("Scheduling Node %s for STONITH", node->details->uname);
pcmk__order_vs_fence(stonith_op, data_set);
if (node->details->is_dc) {
// Remember if the DC is being fenced
dc_down = stonith_op;
} else {
if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
&& (stonith_ops != NULL)) {
/* Concurrent fencing is disabled, so order each non-DC
* fencing in a chain. If there is any DC fencing or
* shutdown, it will be ordered after the last action in the
* chain later.
*/
order_actions((pe_action_t *) stonith_ops->data,
stonith_op, pe_order_optional);
}
// Remember all non-DC fencing actions in a separate list
stonith_ops = g_list_prepend(stonith_ops, stonith_op);
}
} else if (node->details->online && node->details->shutdown &&
/* TODO define what a shutdown op means for a remote node.
* For now we do not send shutdown operations for remote nodes, but
* if we can come up with a good use for this in the future, we will. */
pe__is_guest_or_remote_node(node) == FALSE) {
pe_action_t *down_op = pcmk__new_shutdown_action(node, data_set);
if (node->details->is_dc) {
// Remember if the DC is being shut down
dc_down = down_op;
} else {
// Remember non-DC shutdowns for later ordering
shutdown_ops = g_list_prepend(shutdown_ops, down_op);
}
}
if (node->details->unclean && stonith_op == NULL) {
integrity_lost = TRUE;
pe_warn("Node %s is unclean!", node->details->uname);
}
}
if (integrity_lost) {
if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
} else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
crm_notice("Cannot fence unclean nodes until quorum is"
" attained (or no-quorum-policy is set to ignore)");
}
}
if (dc_down != NULL) {
/* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
* DC elections. However, we don't want to order non-DC shutdowns before
* a DC *fencing*, because even though we don't want a node that's
* shutting down to become DC, the DC fencing could be ordered before a
* clone stop that's also ordered before the shutdowns, thus leading to
* a graph loop.
*/
if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) {
pe_action_t *node_stop = (pe_action_t *) gIter->data;
crm_debug("Ordering shutdown on %s before %s on DC %s",
node_stop->node->details->uname,
dc_down->task, dc_down->node->details->uname);
order_actions(node_stop, dc_down, pe_order_optional);
}
}
// Order any non-DC fencing before any DC fencing or shutdown
if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
/* With concurrent fencing, order each non-DC fencing action
* separately before any DC fencing or shutdown.
*/
for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) {
order_actions((pe_action_t *) gIter->data, dc_down,
pe_order_optional);
}
} else if (stonith_ops) {
/* Without concurrent fencing, the non-DC fencing actions are
* already ordered relative to each other, so we just need to order
* the DC fencing after the last action in the chain (which is the
* first item in the list).
*/
order_actions((pe_action_t *) stonith_ops->data, dc_down,
pe_order_optional);
}
}
g_list_free(stonith_ops);
g_list_free(shutdown_ops);
return TRUE;
}
static void
log_resource_details(pe_working_set_t *data_set)
{
pcmk__output_t *out = data_set->priv;
GList *all = NULL;
/* We need a list of nodes that we are allowed to output information for.
* This is necessary because out->message for all the resource-related
* messages expects such a list, due to the `crm_mon --node=` feature. Here,
* we just make it a list of all the nodes.
*/
all = g_list_prepend(all, (gpointer) "*");
for (GList *item = data_set->resources; item != NULL; item = item->next) {
pe_resource_t *rsc = (pe_resource_t *) item->data;
// Log all resources except inactive orphans
if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
|| (rsc->role != RSC_ROLE_STOPPED)) {
out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
}
}
g_list_free(all);
}
static void
log_all_actions(pe_working_set_t *data_set)
{
/* This only ever outputs to the log, so ignore whatever output object was
* previously set and just log instead.
*/
pcmk__output_t *prev_out = data_set->priv;
pcmk__output_t *out = pcmk__new_logger();
if (out == NULL) {
return;
}
pcmk__output_set_log_level(out, LOG_NOTICE);
data_set->priv = out;
out->begin_list(out, NULL, NULL, "Actions");
pcmk__output_actions(data_set);
out->end_list(out);
out->finish(out, CRM_EX_OK, true, NULL);
pcmk__output_free(out);
data_set->priv = prev_out;
}
/*!
* \internal
* \brief Log all required but unrunnable actions at trace level
*
* \param[in] data_set Cluster working set
*/
static void
log_unrunnable_actions(pe_working_set_t *data_set)
{
const uint64_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
crm_trace("Required but unrunnable actions:");
for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
pe_action_t *action = (pe_action_t *) iter->data;
if (!pcmk_any_flags_set(action->flags, flags)) {
pcmk__log_action("\t", action, true);
}
}
}
/*!
* \internal
* \brief Unpack the CIB for scheduling
*
* \param[in] cib CIB XML to unpack (may be NULL if previously unpacked)
* \param[in] flags Working set flags to set in addition to defaults
* \param[in] data_set Cluster working set
*/
static void
unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
{
if (pcmk_is_set(data_set->flags, pe_flag_have_status)) {
crm_trace("Reusing previously calculated cluster status");
pe__set_working_set_flags(data_set, flags);
return;
}
CRM_ASSERT(cib != NULL);
crm_trace("Calculating cluster status");
/* This will zero the entire struct without freeing anything first, so
* callers should never call pcmk__schedule_actions() with a populated data
* set unless pe_flag_have_status is set (i.e. cluster_status() was
* previously called, whether directly or via pcmk__schedule_actions()).
*/
set_working_set_defaults(data_set);
pe__set_working_set_flags(data_set, flags);
data_set->input = cib;
cluster_status(data_set); // Sets pe_flag_have_status
}
/*!
* \internal
* \brief Run the scheduler for a given CIB
*
* \param[in] cib CIB XML to use as scheduler input
* \param[in] flags Working set flags to set in addition to defaults
* \param[in,out] data_set Cluster working set
*/
void
pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
pe_working_set_t *data_set)
{
unpack_cib(cib, flags, data_set);
pcmk__set_allocation_methods(data_set);
pcmk__apply_node_health(data_set);
pcmk__unpack_constraints(data_set);
if (pcmk_is_set(data_set->flags, pe_flag_check_config)) {
return;
}
if (!pcmk_is_set(data_set->flags, pe_flag_quick_location) &&
pcmk__is_daemon) {
log_resource_details(data_set);
}
- crm_trace("Applying location constraints");
- stage2(data_set);
+ apply_node_criteria(data_set);
if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
return;
}
pcmk__create_internal_constraints(data_set);
pcmk__handle_rsc_config_changes(data_set);
- crm_trace("Allocate resources");
+ allocate_resources(data_set);
+
stage5(data_set);
crm_trace("Processing fencing and shutdown cases");
stage6(data_set);
pcmk__apply_orderings(data_set);
log_all_actions(data_set);
crm_trace("Create transition graph");
pcmk__create_graph(data_set);
if (get_crm_log_level() == LOG_TRACE) {
log_unrunnable_actions(data_set);
}
}
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index ef25c4df5c..ecaf0ad6e4 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -1,1121 +1,1128 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include "libpacemaker_private.h"
#define PE__VARIANT_BUNDLE 1
#include
static bool
is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node)
{
for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (node->details == replica->node->details) {
return TRUE;
}
}
return FALSE;
}
gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set);
void distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
int max, int per_host_max, pe_working_set_t * data_set);
static GList *
get_container_list(pe_resource_t *rsc)
{
GList *containers = NULL;
if (rsc->variant == pe_container) {
pe__bundle_variant_data_t *data = NULL;
get_bundle_variant_data(data, rsc);
for (GList *gIter = data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
containers = g_list_append(containers, replica->container);
}
}
return containers;
}
static inline GList *
get_containers_or_children(pe_resource_t *rsc)
{
return (rsc->variant == pe_container)?
get_container_list(rsc) : rsc->children;
}
pe_node_t *
pcmk__bundle_allocate(pe_resource_t *rsc, pe_node_t *prefer,
pe_working_set_t *data_set)
{
GList *containers = NULL;
GList *nodes = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return NULL);
get_bundle_variant_data(bundle_data, rsc);
pe__set_resource_flags(rsc, pe_rsc_allocating);
containers = get_container_list(rsc);
pe__show_node_weights(!pcmk_is_set(data_set->flags, pe_flag_show_scores),
rsc, __func__, rsc->allowed_nodes, data_set);
nodes = g_hash_table_get_values(rsc->allowed_nodes);
nodes = pcmk__sort_nodes(nodes, NULL, data_set);
containers = g_list_sort_with_data(containers, sort_clone_instance, data_set);
distribute_children(rsc, containers, nodes, bundle_data->nreplicas,
bundle_data->nreplicas_per_host, data_set);
g_list_free(nodes);
g_list_free(containers);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
pe_node_t *container_host = NULL;
CRM_ASSERT(replica);
if (replica->ip) {
pe_rsc_trace(rsc, "Allocating bundle %s IP %s",
rsc->id, replica->ip->id);
replica->ip->cmds->allocate(replica->ip, prefer, data_set);
}
container_host = replica->container->allocated_to;
if (replica->remote && pe__is_guest_or_remote_node(container_host)) {
/* We need 'nested' connection resources to be on the same
* host because pacemaker-remoted only supports a single
* active connection
*/
pcmk__new_colocation("child-remote-with-docker-remote", NULL,
INFINITY, replica->remote,
container_host->details->remote_rsc, NULL,
NULL, true, data_set);
}
if (replica->remote) {
pe_rsc_trace(rsc, "Allocating bundle %s connection %s",
rsc->id, replica->remote->id);
replica->remote->cmds->allocate(replica->remote, prefer,
data_set);
}
// Explicitly allocate replicas' children before bundle child
if (replica->child) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
if (node->details != replica->node->details) {
node->weight = -INFINITY;
} else if (!pcmk__threshold_reached(replica->child, node,
NULL)) {
node->weight = INFINITY;
}
}
pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
pe_rsc_trace(rsc, "Allocating bundle %s replica child %s",
rsc->id, replica->child->id);
replica->child->cmds->allocate(replica->child, replica->node,
data_set);
pe__clear_resource_flags(replica->child->parent,
pe_rsc_allocating);
}
}
if (bundle_data->child) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, bundle_data->child->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
if (is_bundle_node(bundle_data, node)) {
node->weight = 0;
} else {
node->weight = -INFINITY;
}
}
pe_rsc_trace(rsc, "Allocating bundle %s child %s",
rsc->id, bundle_data->child->id);
bundle_data->child->cmds->allocate(bundle_data->child, prefer, data_set);
}
pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
return NULL;
}
void
pcmk__bundle_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
{
pe_action_t *action = NULL;
GList *containers = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
containers = get_container_list(rsc);
get_bundle_variant_data(bundle_data, rsc);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (replica->ip) {
replica->ip->cmds->create_actions(replica->ip, data_set);
}
if (replica->container) {
replica->container->cmds->create_actions(replica->container,
data_set);
}
if (replica->remote) {
replica->remote->cmds->create_actions(replica->remote, data_set);
}
}
clone_create_pseudo_actions(rsc, containers, NULL, NULL, data_set);
if (bundle_data->child) {
bundle_data->child->cmds->create_actions(bundle_data->child, data_set);
if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
/* promote */
pcmk__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true);
action = pcmk__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true);
action->priority = INFINITY;
/* demote */
pcmk__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true);
action = pcmk__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true);
action->priority = INFINITY;
}
}
g_list_free(containers);
}
void
pcmk__bundle_internal_constraints(pe_resource_t *rsc,
pe_working_set_t *data_set)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_bundle_variant_data(bundle_data, rsc);
if (bundle_data->child) {
pcmk__order_resource_actions(rsc, RSC_START, bundle_data->child,
RSC_START, pe_order_implies_first_printed,
data_set);
pcmk__order_resource_actions(rsc, RSC_STOP, bundle_data->child,
RSC_STOP, pe_order_implies_first_printed,
data_set);
if (bundle_data->child->children) {
pcmk__order_resource_actions(bundle_data->child, RSC_STARTED, rsc,
RSC_STARTED,
pe_order_implies_then_printed,
data_set);
pcmk__order_resource_actions(bundle_data->child, RSC_STOPPED, rsc,
RSC_STOPPED,
pe_order_implies_then_printed,
data_set);
} else {
pcmk__order_resource_actions(bundle_data->child, RSC_START, rsc,
RSC_STARTED,
pe_order_implies_then_printed,
data_set);
pcmk__order_resource_actions(bundle_data->child, RSC_STOP, rsc,
RSC_STOPPED,
pe_order_implies_then_printed,
data_set);
}
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
CRM_ASSERT(replica->container);
replica->container->cmds->internal_constraints(replica->container,
data_set);
pcmk__order_starts(rsc, replica->container,
pe_order_runnable_left|pe_order_implies_first_printed,
data_set);
if (replica->child) {
pcmk__order_stops(rsc, replica->child,
pe_order_implies_first_printed, data_set);
}
pcmk__order_stops(rsc, replica->container,
pe_order_implies_first_printed, data_set);
pcmk__order_resource_actions(replica->container, RSC_START, rsc,
RSC_STARTED, pe_order_implies_then_printed,
data_set);
pcmk__order_resource_actions(replica->container, RSC_STOP, rsc,
RSC_STOPPED, pe_order_implies_then_printed,
data_set);
if (replica->ip) {
replica->ip->cmds->internal_constraints(replica->ip, data_set);
// Start IP then container
pcmk__order_starts(replica->ip, replica->container,
pe_order_runnable_left|pe_order_preserve,
data_set);
pcmk__order_stops(replica->container, replica->ip,
pe_order_implies_first|pe_order_preserve,
data_set);
pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip,
replica->container, NULL, NULL, true,
data_set);
}
if (replica->remote) {
/* This handles ordering and colocating remote relative to container
* (via "resource-with-container"). Since IP is also ordered and
* colocated relative to the container, we don't need to do anything
* explicit here with IP.
*/
replica->remote->cmds->internal_constraints(replica->remote,
data_set);
}
if (replica->child) {
CRM_ASSERT(replica->remote);
// "Start remote then child" is implicit in scheduler's remote logic
}
}
if (bundle_data->child) {
bundle_data->child->cmds->internal_constraints(bundle_data->child, data_set);
if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
promote_demote_constraints(rsc, data_set);
/* child demoted before global demoted */
pcmk__order_resource_actions(bundle_data->child, RSC_DEMOTED, rsc,
RSC_DEMOTED,
pe_order_implies_then_printed,
data_set);
/* global demote before child demote */
pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundle_data->child,
RSC_DEMOTE,
pe_order_implies_first_printed,
data_set);
/* child promoted before global promoted */
pcmk__order_resource_actions(bundle_data->child, RSC_PROMOTED, rsc,
RSC_PROMOTED,
pe_order_implies_then_printed,
data_set);
/* global promote before child promote */
pcmk__order_resource_actions(rsc, RSC_PROMOTE, bundle_data->child,
RSC_PROMOTE,
pe_order_implies_first_printed,
data_set);
}
}
}
static pe_resource_t *
compatible_replica_for_node(pe_resource_t *rsc_lh, pe_node_t *candidate,
pe_resource_t *rsc, enum rsc_role_e filter,
gboolean current)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(candidate != NULL, return NULL);
get_bundle_variant_data(bundle_data, rsc);
crm_trace("Looking for compatible child from %s for %s on %s",
rsc_lh->id, rsc->id, candidate->details->uname);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (is_child_compatible(replica->container, candidate, filter, current)) {
crm_trace("Pairing %s with %s on %s",
rsc_lh->id, replica->container->id,
candidate->details->uname);
return replica->container;
}
}
crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id);
return NULL;
}
static pe_resource_t *
compatible_replica(pe_resource_t *rsc_lh, pe_resource_t *rsc,
enum rsc_role_e filter, gboolean current,
pe_working_set_t *data_set)
{
GList *scratch = NULL;
pe_resource_t *pair = NULL;
pe_node_t *active_node_lh = NULL;
active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current);
if (active_node_lh) {
return compatible_replica_for_node(rsc_lh, active_node_lh, rsc, filter,
current);
}
scratch = g_hash_table_get_values(rsc_lh->allowed_nodes);
scratch = pcmk__sort_nodes(scratch, NULL, data_set);
for (GList *gIter = scratch; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
pair = compatible_replica_for_node(rsc_lh, node, rsc, filter, current);
if (pair) {
goto done;
}
}
pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none"));
done:
g_list_free(scratch);
return pair;
}
void
pcmk__bundle_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set)
{
/* -- Never called --
*
* Instead we add the colocation constraints to the child and call from there
*/
CRM_ASSERT(FALSE);
}
int copies_per_node(pe_resource_t * rsc)
{
/* Strictly speaking, there should be a 'copies_per_node' addition
* to the resource function table and each case would be a
* function. However that would be serious overkill to return an
* int. In fact, it seems to me that both function tables
* could/should be replaced by resources.{c,h} full of
* rsc_{some_operation} functions containing a switch as below
* which calls out to functions named {variant}_{some_operation}
* as needed.
*/
switch(rsc->variant) {
case pe_unknown:
return 0;
case pe_native:
case pe_group:
return 1;
case pe_clone:
{
const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
if (max_clones_node == NULL) {
return 1;
} else {
int max_i;
pcmk__scan_min_int(max_clones_node, &max_i, 0);
return max_i;
}
}
case pe_container:
{
pe__bundle_variant_data_t *data = NULL;
get_bundle_variant_data(data, rsc);
return data->nreplicas_per_host;
}
}
return 0;
}
void
pcmk__bundle_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set)
{
GList *allocated_primaries = NULL;
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(constraint != NULL, return);
CRM_CHECK(dependent != NULL,
pe_err("dependent was NULL for %s", constraint->id); return);
CRM_CHECK(primary != NULL,
pe_err("primary was NULL for %s", constraint->id); return);
CRM_ASSERT(dependent->variant == pe_native);
if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
pe_rsc_trace(primary, "%s is still provisional", primary->id);
return;
} else if(constraint->dependent->variant > pe_group) {
pe_resource_t *primary_replica = compatible_replica(dependent, primary,
RSC_ROLE_UNKNOWN,
FALSE, data_set);
if (primary_replica) {
pe_rsc_debug(primary, "Pairing %s with %s",
dependent->id, primary_replica->id);
dependent->cmds->rsc_colocation_lh(dependent, primary_replica,
constraint, data_set);
} else if (constraint->score >= INFINITY) {
crm_notice("Cannot pair %s with instance of %s",
dependent->id, primary->id);
pcmk__assign_resource(dependent, NULL, true);
} else {
pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
dependent->id, primary->id);
}
return;
}
get_bundle_variant_data(bundle_data, primary);
pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
constraint->id, dependent->id, primary->id, constraint->score);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (constraint->score < INFINITY) {
replica->container->cmds->rsc_colocation_rh(dependent,
replica->container,
constraint, data_set);
} else {
pe_node_t *chosen = replica->container->fns->location(replica->container,
NULL, FALSE);
if ((chosen == NULL)
|| is_set_recursive(replica->container, pe_rsc_block, TRUE)) {
continue;
}
if ((constraint->primary_role >= RSC_ROLE_PROMOTED)
&& (replica->child == NULL)) {
continue;
}
if ((constraint->primary_role >= RSC_ROLE_PROMOTED)
&& (replica->child->next_role < RSC_ROLE_PROMOTED)) {
continue;
}
pe_rsc_trace(primary, "Allowing %s: %s %d",
constraint->id, chosen->details->uname,
chosen->weight);
allocated_primaries = g_list_prepend(allocated_primaries, chosen);
}
}
if (constraint->score >= INFINITY) {
node_list_exclude(dependent->allowed_nodes, allocated_primaries, FALSE);
}
g_list_free(allocated_primaries);
}
enum pe_action_flags
pcmk__bundle_action_flags(pe_action_t *action, pe_node_t *node)
{
GList *containers = NULL;
enum pe_action_flags flags = 0;
pe__bundle_variant_data_t *data = NULL;
get_bundle_variant_data(data, action->rsc);
if(data->child) {
enum action_tasks task = get_complex_task(data->child, action->task, TRUE);
switch(task) {
case no_action:
case action_notify:
case action_notified:
case action_promote:
case action_promoted:
case action_demote:
case action_demoted:
return summary_action_flags(action, data->child->children, node);
default:
break;
}
}
containers = get_container_list(action->rsc);
flags = summary_action_flags(action, containers, node);
g_list_free(containers);
return flags;
}
pe_resource_t *
find_compatible_child_by_node(pe_resource_t * local_child, pe_node_t * local_node, pe_resource_t * rsc,
enum rsc_role_e filter, gboolean current)
{
GList *gIter = NULL;
GList *children = NULL;
if (local_node == NULL) {
crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id);
return NULL;
}
crm_trace("Looking for compatible child from %s for %s on %s",
local_child->id, rsc->id, local_node->details->uname);
children = get_containers_or_children(rsc);
for (gIter = children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if(is_child_compatible(child_rsc, local_node, filter, current)) {
crm_trace("Pairing %s with %s on %s",
local_child->id, child_rsc->id, local_node->details->uname);
return child_rsc;
}
}
crm_trace("Can't pair %s with %s", local_child->id, rsc->id);
if(children != rsc->children) {
g_list_free(children);
}
return NULL;
}
static pe__bundle_replica_t *
replica_for_container(pe_resource_t *rsc, pe_resource_t *container,
pe_node_t *node)
{
if (rsc->variant == pe_container) {
pe__bundle_variant_data_t *data = NULL;
get_bundle_variant_data(data, rsc);
for (GList *gIter = data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (replica->child
&& (container == replica->container)
&& (node->details == replica->node->details)) {
return replica;
}
}
}
return NULL;
}
static enum pe_graph_flags
multi_update_interleave_actions(pe_action_t *first, pe_action_t *then,
pe_node_t *node, enum pe_action_flags flags,
enum pe_action_flags filter,
enum pe_ordering type,
pe_working_set_t *data_set)
{
GList *gIter = NULL;
GList *children = NULL;
gboolean current = FALSE;
enum pe_graph_flags changed = pe_graph_none;
/* Fix this - lazy */
if (pcmk__ends_with(first->uuid, "_stopped_0")
|| pcmk__ends_with(first->uuid, "_demoted_0")) {
current = TRUE;
}
children = get_containers_or_children(then->rsc);
for (gIter = children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *then_child = gIter->data;
pe_resource_t *first_child = find_compatible_child(then_child,
first->rsc,
RSC_ROLE_UNKNOWN,
current, data_set);
if (first_child == NULL && current) {
crm_trace("Ignore");
} else if (first_child == NULL) {
crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid);
/* Me no like this hack - but what else can we do?
*
* If there is no-one active or about to be active
* on the same node as then_child, then they must
* not be allowed to start
*/
if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) {
pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id);
if (pcmk__assign_resource(then_child, NULL, true)) {
pe__set_graph_flags(changed, first, pe_graph_updated_then);
}
}
} else {
pe_action_t *first_action = NULL;
pe_action_t *then_action = NULL;
enum action_tasks task = clone_child_action(first);
const char *first_task = task2text(task);
pe__bundle_replica_t *first_replica = NULL;
pe__bundle_replica_t *then_replica = NULL;
first_replica = replica_for_container(first->rsc, first_child,
node);
if (strstr(first->task, "stop") && first_replica && first_replica->child) {
/* Except for 'stopped' we should be looking at the
* in-container resource, actions for the child will
* happen later and are therefor more likely to align
* with the user's intent.
*/
first_action = find_first_action(first_replica->child->actions,
NULL, task2text(task), node);
} else {
first_action = find_first_action(first_child->actions, NULL, task2text(task), node);
}
then_replica = replica_for_container(then->rsc, then_child, node);
if (strstr(then->task, "mote")
&& then_replica && then_replica->child) {
/* Promote/demote actions will never be found for the
* container resource, look in the child instead
*
* Alternatively treat:
* 'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and
* 'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY'
*/
then_action = find_first_action(then_replica->child->actions,
NULL, then->task, node);
} else {
then_action = find_first_action(then_child->actions, NULL, then->task, node);
}
if (first_action == NULL) {
if (!pcmk_is_set(first_child->flags, pe_rsc_orphan)
&& !pcmk__str_any_of(first_task, RSC_STOP, RSC_DEMOTE, NULL)) {
crm_err("Internal error: No action found for %s in %s (first)",
first_task, first_child->id);
} else {
crm_trace("No action found for %s in %s%s (first)",
first_task, first_child->id,
pcmk_is_set(first_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
}
continue;
}
/* We're only interested if 'then' is neither stopping nor being demoted */
if (then_action == NULL) {
if (!pcmk_is_set(then_child->flags, pe_rsc_orphan)
&& !pcmk__str_any_of(then->task, RSC_STOP, RSC_DEMOTE, NULL)) {
crm_err("Internal error: No action found for %s in %s (then)",
then->task, then_child->id);
} else {
crm_trace("No action found for %s in %s%s (then)",
then->task, then_child->id,
pcmk_is_set(then_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
}
continue;
}
if (order_actions(first_action, then_action, type)) {
crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x",
first_action->uuid,
pcmk_is_set(first_action->flags, pe_action_optional),
then_action->uuid,
pcmk_is_set(then_action->flags, pe_action_optional),
type);
pe__set_graph_flags(changed, first,
pe_graph_updated_first|pe_graph_updated_then);
}
if(first_action && then_action) {
changed |= then_child->cmds->update_actions(first_action,
then_action, node,
first_child->cmds->action_flags(first_action, node),
filter, type, data_set);
} else {
crm_err("Nothing found either for %s (%p) or %s (%p) %s",
first_child->id, first_action,
then_child->id, then_action, task2text(task));
}
}
}
if(children != then->rsc->children) {
g_list_free(children);
}
return changed;
}
static bool
can_interleave_actions(pe_action_t *first, pe_action_t *then)
{
bool interleave = FALSE;
pe_resource_t *rsc = NULL;
const char *interleave_s = NULL;
if(first->rsc == NULL || then->rsc == NULL) {
crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid);
return FALSE;
} else if(first->rsc == then->rsc) {
crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid);
return FALSE;
} else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) {
crm_trace("Not interleaving %s with %s (both sides must be clones or bundles)", first->uuid, then->uuid);
return FALSE;
}
if (pcmk__ends_with(then->uuid, "_stop_0")
|| pcmk__ends_with(then->uuid, "_demote_0")) {
rsc = first->rsc;
} else {
rsc = then->rsc;
}
interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE);
interleave = crm_is_true(interleave_s);
crm_trace("Interleave %s -> %s: %s (based on %s)",
first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id);
return interleave;
}
enum pe_graph_flags
pcmk__multi_update_actions(pe_action_t *first, pe_action_t *then,
pe_node_t *node, enum pe_action_flags flags,
enum pe_action_flags filter, enum pe_ordering type,
pe_working_set_t *data_set)
{
enum pe_graph_flags changed = pe_graph_none;
crm_trace("%s -> %s", first->uuid, then->uuid);
if(can_interleave_actions(first, then)) {
changed = multi_update_interleave_actions(first, then, node, flags,
filter, type, data_set);
} else if(then->rsc) {
GList *gIter = NULL;
GList *children = NULL;
// Handle the 'primitive' ordering case
changed |= native_update_actions(first, then, node, flags, filter,
type, data_set);
// Now any children (or containers in the case of a bundle)
children = get_containers_or_children(then->rsc);
for (gIter = children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *then_child = (pe_resource_t *) gIter->data;
enum pe_graph_flags then_child_changed = pe_graph_none;
pe_action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node);
if (then_child_action) {
enum pe_action_flags then_child_flags = then_child->cmds->action_flags(then_child_action, node);
if (pcmk_is_set(then_child_flags, pe_action_runnable)) {
then_child_changed |= then_child->cmds->update_actions(first,
then_child_action, node, flags, filter, type, data_set);
}
changed |= then_child_changed;
if (then_child_changed & pe_graph_updated_then) {
for (GList *lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) {
pe_action_wrapper_t *next = (pe_action_wrapper_t *) lpc->data;
pcmk__update_action_for_orderings(next->action,
data_set);
}
}
}
}
if(children != then->rsc->children) {
g_list_free(children);
}
}
return changed;
}
void
pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
{
pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, rsc);
pcmk__apply_location(constraint, rsc);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
if (replica->container) {
replica->container->cmds->rsc_location(replica->container,
constraint);
}
if (replica->ip) {
replica->ip->cmds->rsc_location(replica->ip, constraint);
}
}
if (bundle_data->child
&& ((constraint->role_filter == RSC_ROLE_UNPROMOTED)
|| (constraint->role_filter == RSC_ROLE_PROMOTED))) {
bundle_data->child->cmds->rsc_location(bundle_data->child, constraint);
bundle_data->child->rsc_location = g_list_prepend(bundle_data->child->rsc_location,
constraint);
}
}
void
pcmk__bundle_expand(pe_resource_t *rsc, pe_working_set_t * data_set)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_bundle_variant_data(bundle_data, rsc);
if (bundle_data->child) {
bundle_data->child->cmds->expand(bundle_data->child, data_set);
}
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (replica->remote && replica->container
&& pe__bundle_needs_remote_name(replica->remote, data_set)) {
/* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
* run pacemaker-remoted inside, without needing a separate IP for
* the container. This is done by configuring the inner remote's
* connection host as the magic string "#uname", then
* replacing it with the underlying host when needed.
*/
xmlNode *nvpair = get_xpath_object("//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']",
replica->remote->xml, LOG_ERR);
const char *calculated_addr = NULL;
// Replace the value in replica->remote->xml (if appropriate)
calculated_addr = pe__add_bundle_remote_name(replica->remote,
data_set,
nvpair, "value");
if (calculated_addr) {
/* Since this is for the bundle as a resource, and not any
* particular action, replace the value in the default
* parameters (not evaluated for node). action2xml() will grab
* it from there to replace it in node-evaluated parameters.
*/
GHashTable *params = pe_rsc_params(replica->remote,
NULL, data_set);
crm_trace("Set address for bundle connection %s to bundle host %s",
replica->remote->id, calculated_addr);
g_hash_table_replace(params,
strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
strdup(calculated_addr));
} else {
/* The only way to get here is if the remote connection is
* neither currently running nor scheduled to run. That means we
* won't be doing any operations that require addr (only start
* requires it; we additionally use it to compare digests when
* unpacking status, promote, and migrate_from history, but
* that's already happened by this point).
*/
crm_info("Unable to determine address for bundle %s remote connection",
rsc->id);
}
}
if (replica->ip) {
replica->ip->cmds->expand(replica->ip, data_set);
}
if (replica->container) {
replica->container->cmds->expand(replica->container, data_set);
}
if (replica->remote) {
replica->remote->cmds->expand(replica->remote, data_set);
}
}
}
gboolean
pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node,
pe_action_t *complete, gboolean force,
pe_working_set_t * data_set)
{
bool any_created = FALSE;
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return FALSE);
get_bundle_variant_data(bundle_data, rsc);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (replica->ip) {
any_created |= replica->ip->cmds->create_probe(replica->ip, node,
complete, force,
data_set);
}
if (replica->child && (node->details == replica->node->details)) {
any_created |= replica->child->cmds->create_probe(replica->child,
node, complete,
force, data_set);
}
if (replica->container) {
bool created = replica->container->cmds->create_probe(replica->container,
node, complete,
force, data_set);
if(created) {
any_created = TRUE;
/* If we're limited to one replica per host (due to
* the lack of an IP range probably), then we don't
* want any of our peer containers starting until
* we've established that no other copies are already
* running.
*
* Partly this is to ensure that nreplicas_per_host is
* observed, but also to ensure that the containers
* don't fail to start because the necessary port
* mappings (which won't include an IP for uniqueness)
* are already taken
*/
for (GList *tIter = bundle_data->replicas;
tIter && (bundle_data->nreplicas_per_host == 1);
tIter = tIter->next) {
pe__bundle_replica_t *other = tIter->data;
if ((other != replica) && (other != NULL)
&& (other->container != NULL)) {
pcmk__new_ordering(replica->container,
pcmk__op_key(replica->container->id, RSC_STATUS, 0),
NULL, other->container,
pcmk__op_key(other->container->id, RSC_START, 0),
NULL,
pe_order_optional|pe_order_same_node,
data_set);
}
}
}
}
if (replica->container && replica->remote
&& replica->remote->cmds->create_probe(replica->remote, node,
complete, force,
data_set)) {
/* Do not probe the remote resource until we know where the
* container is running. This is required for REMOTE_CONTAINER_HACK
* to correctly probe remote resources.
*/
char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS,
0);
pe_action_t *probe = find_first_action(replica->remote->actions,
probe_uuid, NULL, node);
free(probe_uuid);
if (probe) {
any_created = TRUE;
crm_trace("Ordering %s probe on %s",
replica->remote->id, node->details->uname);
pcmk__new_ordering(replica->container,
pcmk__op_key(replica->container->id, RSC_START, 0),
NULL, replica->remote, NULL, probe,
pe_order_probe, data_set);
}
}
}
return any_created;
}
void
pcmk__bundle_append_meta(pe_resource_t *rsc, xmlNode *xml)
{
}
void
pcmk__output_bundle_actions(pe_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
get_bundle_variant_data(bundle_data, rsc);
for (GList *gIter = bundle_data->replicas; gIter != NULL;
gIter = gIter->next) {
pe__bundle_replica_t *replica = gIter->data;
CRM_ASSERT(replica);
if (replica->ip != NULL) {
replica->ip->cmds->output_actions(replica->ip);
}
if (replica->container != NULL) {
replica->container->cmds->output_actions(replica->container);
}
if (replica->remote != NULL) {
replica->remote->cmds->output_actions(replica->remote);
}
if (replica->child != NULL) {
replica->child->cmds->output_actions(replica->child);
}
}
}
// Bundle implementation of resource_alloc_functions_t:add_utilization()
void
pcmk__bundle_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization)
{
pe__bundle_variant_data_t *bundle_data = NULL;
pe__bundle_replica_t *replica = NULL;
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return;
}
get_bundle_variant_data(bundle_data, rsc);
if (bundle_data->replicas == NULL) {
return;
}
/* All bundle replicas are identical, so using the utilization of the first
* is sufficient for any. Only the implicit container resource can have
* utilization values.
*/
replica = (pe__bundle_replica_t *) bundle_data->replicas->data;
if (replica->container != NULL) {
replica->container->cmds->add_utilization(replica->container, orig_rsc,
all_rscs, utilization);
}
}
+
+// Bundle implementation of resource_alloc_functions_t:shutdown_lock()
+void
+pcmk__bundle_shutdown_lock(pe_resource_t *rsc)
+{
+ return; // Bundles currently don't support shutdown locks
+}
diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c
index a09862ad59..a621c5ba59 100644
--- a/lib/pacemaker/pcmk_sched_clone.c
+++ b/lib/pacemaker/pcmk_sched_clone.c
@@ -1,1562 +1,1569 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include "libpacemaker_private.h"
#define VARIANT_CLONE 1
#include
gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set);
static void append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all);
static gint
sort_rsc_id(gconstpointer a, gconstpointer b)
{
const pe_resource_t *resource1 = (const pe_resource_t *)a;
const pe_resource_t *resource2 = (const pe_resource_t *)b;
long num1, num2;
CRM_ASSERT(resource1 != NULL);
CRM_ASSERT(resource2 != NULL);
/*
* Sort clone instances numerically by instance number, so instance :10
* comes after :9.
*/
num1 = strtol(strrchr(resource1->id, ':') + 1, NULL, 10);
num2 = strtol(strrchr(resource2->id, ':') + 1, NULL, 10);
if (num1 < num2) {
return -1;
} else if (num1 > num2) {
return 1;
}
return 0;
}
static pe_node_t *
parent_node_instance(const pe_resource_t * rsc, pe_node_t * node)
{
pe_node_t *ret = NULL;
if (node != NULL && rsc->parent) {
ret = pe_hash_table_lookup(rsc->parent->allowed_nodes, node->details->id);
} else if(node != NULL) {
ret = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
}
return ret;
}
static gboolean
did_fail(const pe_resource_t * rsc)
{
GList *gIter = rsc->children;
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
return TRUE;
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
if (did_fail(child_rsc)) {
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Compare instances based on colocation scores.
*
* Determines the relative order in which \c rsc1 and \c rsc2 should be
* allocated. If one resource compares less than the other, then it
* should be allocated first.
*
* \param[in] rsc1 The first instance to compare.
* \param[in] rsc2 The second instance to compare.
* \param[in] data_set Cluster working set.
*
* \return -1 if `rsc1 < rsc2`,
* 0 if `rsc1 == rsc2`, or
* 1 if `rsc1 > rsc2`
*/
static int
order_instance_by_colocation(const pe_resource_t *rsc1,
const pe_resource_t *rsc2,
pe_working_set_t *data_set)
{
int rc = 0;
pe_node_t *n = NULL;
pe_node_t *node1 = NULL;
pe_node_t *node2 = NULL;
pe_node_t *current_node1 = pe__current_node(rsc1);
pe_node_t *current_node2 = pe__current_node(rsc2);
GList *list1 = NULL;
GList *list2 = NULL;
GHashTable *hash1 = pcmk__strkey_table(NULL, free);
GHashTable *hash2 = pcmk__strkey_table(NULL, free);
/* Clone instances must have parents */
CRM_ASSERT(rsc1->parent != NULL);
CRM_ASSERT(rsc2->parent != NULL);
n = pe__copy_node(current_node1);
g_hash_table_insert(hash1, (gpointer) n->details->id, n);
n = pe__copy_node(current_node2);
g_hash_table_insert(hash2, (gpointer) n->details->id, n);
/* Apply rsc1's parental colocations */
for (GList *gIter = rsc1->parent->rsc_cons; gIter != NULL;
gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
crm_trace("Applying %s to %s", constraint->id, rsc1->id);
hash1 = pcmk__native_merge_weights(constraint->primary, rsc1->id, hash1,
constraint->node_attribute,
constraint->score / (float) INFINITY,
0);
}
for (GList *gIter = rsc1->parent->rsc_cons_lhs; gIter != NULL;
gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
if (!pcmk__colocation_has_influence(constraint, rsc1)) {
continue;
}
crm_trace("Applying %s to %s", constraint->id, rsc1->id);
hash1 = pcmk__native_merge_weights(constraint->dependent, rsc1->id,
hash1, constraint->node_attribute,
constraint->score / (float) INFINITY,
pe_weights_positive);
}
/* Apply rsc2's parental colocations */
for (GList *gIter = rsc2->parent->rsc_cons; gIter != NULL;
gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
crm_trace("Applying %s to %s", constraint->id, rsc2->id);
hash2 = pcmk__native_merge_weights(constraint->primary, rsc2->id, hash2,
constraint->node_attribute,
constraint->score / (float) INFINITY,
0);
}
for (GList *gIter = rsc2->parent->rsc_cons_lhs; gIter;
gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
if (!pcmk__colocation_has_influence(constraint, rsc2)) {
continue;
}
crm_trace("Applying %s to %s", constraint->id, rsc2->id);
hash2 = pcmk__native_merge_weights(constraint->dependent, rsc2->id,
hash2, constraint->node_attribute,
constraint->score / (float) INFINITY,
pe_weights_positive);
}
/* Current location score */
node1 = g_hash_table_lookup(hash1, current_node1->details->id);
node2 = g_hash_table_lookup(hash2, current_node2->details->id);
if (node1->weight < node2->weight) {
if (node1->weight < 0) {
crm_trace("%s > %s: current score: %d %d",
rsc1->id, rsc2->id, node1->weight, node2->weight);
rc = -1;
goto out;
} else {
crm_trace("%s < %s: current score: %d %d",
rsc1->id, rsc2->id, node1->weight, node2->weight);
rc = 1;
goto out;
}
} else if (node1->weight > node2->weight) {
crm_trace("%s > %s: current score: %d %d",
rsc1->id, rsc2->id, node1->weight, node2->weight);
rc = -1;
goto out;
}
/* All location scores */
list1 = g_hash_table_get_values(hash1);
list2 = g_hash_table_get_values(hash2);
list1 = pcmk__sort_nodes(list1, current_node1, data_set);
list2 = pcmk__sort_nodes(list2, current_node2, data_set);
for (GList *gIter1 = list1, *gIter2 = list2;
(gIter1 != NULL) && (gIter2 != NULL);
gIter1 = gIter1->next, gIter2 = gIter2->next) {
node1 = (pe_node_t *) gIter1->data;
node2 = (pe_node_t *) gIter2->data;
if (node1 == NULL) {
crm_trace("%s < %s: colocated score NULL", rsc1->id, rsc2->id);
rc = 1;
break;
} else if (node2 == NULL) {
crm_trace("%s > %s: colocated score NULL", rsc1->id, rsc2->id);
rc = -1;
break;
}
if (node1->weight < node2->weight) {
crm_trace("%s < %s: colocated score", rsc1->id, rsc2->id);
rc = 1;
break;
} else if (node1->weight > node2->weight) {
crm_trace("%s > %s: colocated score", rsc1->id, rsc2->id);
rc = -1;
break;
}
}
out:
g_hash_table_destroy(hash1);
g_hash_table_destroy(hash2);
g_list_free(list1);
g_list_free(list2);
return rc;
}
gint
sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set)
{
int rc = 0;
pe_node_t *node1 = NULL;
pe_node_t *node2 = NULL;
pe_node_t *current_node1 = NULL;
pe_node_t *current_node2 = NULL;
unsigned int nnodes1 = 0;
unsigned int nnodes2 = 0;
gboolean can1 = TRUE;
gboolean can2 = TRUE;
const pe_resource_t *resource1 = (const pe_resource_t *)a;
const pe_resource_t *resource2 = (const pe_resource_t *)b;
CRM_ASSERT(resource1 != NULL);
CRM_ASSERT(resource2 != NULL);
/* allocation order:
* - active instances
* - instances running on nodes with the least copies
* - active instances on nodes that can't support them or are to be fenced
* - failed instances
* - inactive instances
*/
current_node1 = pe__find_active_on(resource1, &nnodes1, NULL);
current_node2 = pe__find_active_on(resource2, &nnodes2, NULL);
/* If both instances are running and at least one is multiply
* active, give precedence to the one that's running on fewer nodes.
*/
if ((nnodes1 > 0) && (nnodes2 > 0)) {
if (nnodes1 < nnodes2) {
crm_trace("%s < %s: running_on", resource1->id, resource2->id);
return -1;
} else if (nnodes1 > nnodes2) {
crm_trace("%s > %s: running_on", resource1->id, resource2->id);
return 1;
}
}
/* Instance whose current location is available sorts first */
node1 = current_node1;
node2 = current_node2;
if (node1 != NULL) {
pe_node_t *match = pe_hash_table_lookup(resource1->allowed_nodes, node1->details->id);
if (match == NULL || match->weight < 0) {
crm_trace("%s: current location is unavailable", resource1->id);
node1 = NULL;
can1 = FALSE;
}
}
if (node2 != NULL) {
pe_node_t *match = pe_hash_table_lookup(resource2->allowed_nodes, node2->details->id);
if (match == NULL || match->weight < 0) {
crm_trace("%s: current location is unavailable", resource2->id);
node2 = NULL;
can2 = FALSE;
}
}
if (can1 && !can2) {
crm_trace("%s < %s: availability of current location", resource1->id,
resource2->id);
return -1;
} else if (!can1 && can2) {
crm_trace("%s > %s: availability of current location", resource1->id,
resource2->id);
return 1;
}
/* Higher-priority instance sorts first */
if (resource1->priority > resource2->priority) {
crm_trace("%s < %s: priority", resource1->id, resource2->id);
return -1;
} else if (resource1->priority < resource2->priority) {
crm_trace("%s > %s: priority", resource1->id, resource2->id);
return 1;
}
/* Active instance sorts first */
if (node1 == NULL && node2 == NULL) {
crm_trace("%s == %s: not active", resource1->id, resource2->id);
return 0;
} else if (node1 == NULL) {
crm_trace("%s > %s: active", resource1->id, resource2->id);
return 1;
} else if (node2 == NULL) {
crm_trace("%s < %s: active", resource1->id, resource2->id);
return -1;
}
/* Instance whose current node can run resources sorts first */
can1 = pcmk__node_available(node1);
can2 = pcmk__node_available(node2);
if (can1 && !can2) {
crm_trace("%s < %s: can", resource1->id, resource2->id);
return -1;
} else if (!can1 && can2) {
crm_trace("%s > %s: can", resource1->id, resource2->id);
return 1;
}
/* Is the parent allowed to run on the instance's current node?
* Instance with parent allowed sorts first.
*/
node1 = parent_node_instance(resource1, node1);
node2 = parent_node_instance(resource2, node2);
if (node1 == NULL && node2 == NULL) {
crm_trace("%s == %s: not allowed", resource1->id, resource2->id);
return 0;
} else if (node1 == NULL) {
crm_trace("%s > %s: not allowed", resource1->id, resource2->id);
return 1;
} else if (node2 == NULL) {
crm_trace("%s < %s: not allowed", resource1->id, resource2->id);
return -1;
}
/* Does one node have more instances allocated?
* Instance whose current node has fewer instances sorts first.
*/
if (node1->count < node2->count) {
crm_trace("%s < %s: count", resource1->id, resource2->id);
return -1;
} else if (node1->count > node2->count) {
crm_trace("%s > %s: count", resource1->id, resource2->id);
return 1;
}
/* Failed instance sorts first */
can1 = did_fail(resource1);
can2 = did_fail(resource2);
if (can1 && !can2) {
crm_trace("%s > %s: failed", resource1->id, resource2->id);
return 1;
} else if (!can1 && can2) {
crm_trace("%s < %s: failed", resource1->id, resource2->id);
return -1;
}
rc = order_instance_by_colocation(resource1, resource2, data_set);
if (rc != 0) {
return rc;
}
/* Default to lexicographic order by ID */
rc = strcmp(resource1->id, resource2->id);
crm_trace("%s %c %s: default", resource1->id, rc < 0 ? '<' : '>', resource2->id);
return rc;
}
static pe_node_t *
can_run_instance(pe_resource_t * rsc, pe_node_t * node, int limit)
{
pe_node_t *local_node = NULL;
if (node == NULL && rsc->allowed_nodes) {
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&local_node)) {
can_run_instance(rsc, local_node, limit);
}
return NULL;
}
if (!node) {
/* make clang analyzer happy */
goto bail;
} else if (!pcmk__node_available(node)) {
goto bail;
} else if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
goto bail;
}
local_node = parent_node_instance(rsc, node);
if (local_node == NULL) {
crm_warn("%s cannot run on %s: node not allowed", rsc->id, node->details->uname);
goto bail;
} else if (local_node->weight < 0) {
common_update_score(rsc, node->details->id, local_node->weight);
pe_rsc_trace(rsc, "%s cannot run on %s: Parent node weight doesn't allow it.",
rsc->id, node->details->uname);
} else if (local_node->count < limit) {
pe_rsc_trace(rsc, "%s can run on %s (already running %d)",
rsc->id, node->details->uname, local_node->count);
return local_node;
} else {
pe_rsc_trace(rsc, "%s cannot run on %s: node full (%d >= %d)",
rsc->id, node->details->uname, local_node->count, limit);
}
bail:
if (node) {
common_update_score(rsc, node->details->id, -INFINITY);
}
return NULL;
}
static pe_node_t *
allocate_instance(pe_resource_t *rsc, pe_node_t *prefer, gboolean all_coloc,
int limit, pe_working_set_t *data_set)
{
pe_node_t *chosen = NULL;
GHashTable *backup = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Checking allocation of %s (preferring %s, using %s parent colocations)",
rsc->id, (prefer? prefer->details->uname: "none"),
(all_coloc? "all" : "some"));
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return rsc->fns->location(rsc, NULL, FALSE);
} else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
return NULL;
}
/* Only include positive colocation preferences of dependent resources
* if not every node will get a copy of the clone
*/
append_parent_colocation(rsc->parent, rsc, all_coloc);
if (prefer) {
pe_node_t *local_prefer = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
if (local_prefer == NULL || local_prefer->weight < 0) {
pe_rsc_trace(rsc, "Not pre-allocating %s to %s - unavailable", rsc->id,
prefer->details->uname);
return NULL;
}
}
can_run_instance(rsc, NULL, limit);
backup = pcmk__copy_node_table(rsc->allowed_nodes);
pe_rsc_trace(rsc, "Allocating instance %s", rsc->id);
chosen = rsc->cmds->allocate(rsc, prefer, data_set);
if (chosen && prefer && (chosen->details != prefer->details)) {
crm_info("Not pre-allocating %s to %s because %s is better",
rsc->id, prefer->details->uname, chosen->details->uname);
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = backup;
pcmk__unassign_resource(rsc);
chosen = NULL;
backup = NULL;
}
if (chosen) {
pe_node_t *local_node = parent_node_instance(rsc, chosen);
if (local_node) {
local_node->count++;
} else if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
/* what to do? we can't enforce per-node limits in this case */
pcmk__config_err("%s not found in %s (list of %d)",
chosen->details->id, rsc->parent->id,
g_hash_table_size(rsc->parent->allowed_nodes));
}
}
if(backup) {
g_hash_table_destroy(backup);
}
return chosen;
}
static void
append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all)
{
GList *gIter = NULL;
gIter = rsc->rsc_cons;
for (; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data;
if (all || cons->score < 0 || cons->score == INFINITY) {
child->rsc_cons = g_list_prepend(child->rsc_cons, cons);
}
}
gIter = rsc->rsc_cons_lhs;
for (; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data;
if (!pcmk__colocation_has_influence(cons, child)) {
continue;
}
if (all || cons->score < 0) {
child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons);
}
}
}
void
distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
int max, int per_host_max, pe_working_set_t * data_set);
void
distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
int max, int per_host_max, pe_working_set_t * data_set)
{
int loop_max = 0;
int allocated = 0;
int available_nodes = 0;
bool all_coloc = false;
/* count now tracks the number of clones currently allocated */
for(GList *nIter = nodes; nIter != NULL; nIter = nIter->next) {
pe_node_t *node = nIter->data;
node->count = 0;
if (pcmk__node_available(node)) {
available_nodes++;
}
}
all_coloc = (max < available_nodes) ? true : false;
if(available_nodes) {
loop_max = max / available_nodes;
}
if (loop_max < 1) {
loop_max = 1;
}
pe_rsc_debug(rsc, "Allocating up to %d %s instances to a possible %d nodes (at most %d per host, %d optimal)",
max, rsc->id, available_nodes, per_host_max, loop_max);
/* Pre-allocate as many instances as we can to their current location */
for (GList *gIter = children; gIter != NULL && allocated < max; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
pe_node_t *child_node = NULL;
pe_node_t *local_node = NULL;
if ((child->running_on == NULL)
|| !pcmk_is_set(child->flags, pe_rsc_provisional)
|| pcmk_is_set(child->flags, pe_rsc_failed)) {
continue;
}
child_node = pe__current_node(child);
local_node = parent_node_instance(child, child_node);
pe_rsc_trace(rsc,
"Checking pre-allocation of %s to %s (%d remaining of %d)",
child->id, child_node->details->uname, max - allocated,
max);
if (!pcmk__node_available(child_node) || (child_node->weight < 0)) {
pe_rsc_trace(rsc, "Not pre-allocating because %s can not run %s",
child_node->details->uname, child->id);
continue;
}
if ((local_node != NULL) && (local_node->count >= loop_max)) {
pe_rsc_trace(rsc,
"Not pre-allocating because %s already allocated "
"optimal instances", child_node->details->uname);
continue;
}
if (allocate_instance(child, child_node, all_coloc, per_host_max,
data_set)) {
pe_rsc_trace(rsc, "Pre-allocated %s to %s", child->id,
child_node->details->uname);
allocated++;
}
}
pe_rsc_trace(rsc, "Done pre-allocating (%d of %d)", allocated, max);
for (GList *gIter = children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
if (child->running_on != NULL) {
pe_node_t *child_node = pe__current_node(child);
pe_node_t *local_node = parent_node_instance(child, child_node);
if (local_node == NULL) {
crm_err("%s is running on %s which isn't allowed",
child->id, child_node->details->uname);
}
}
if (!pcmk_is_set(child->flags, pe_rsc_provisional)) {
} else if (allocated >= max) {
pe_rsc_debug(rsc, "Child %s not allocated - limit reached %d %d", child->id, allocated, max);
resource_location(child, NULL, -INFINITY, "clone:limit_reached", data_set);
} else {
if (allocate_instance(child, NULL, all_coloc, per_host_max,
data_set)) {
allocated++;
}
}
}
pe_rsc_debug(rsc, "Allocated %d %s instances of a possible %d",
allocated, rsc->id, max);
}
pe_node_t *
pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer,
pe_working_set_t *data_set)
{
GList *nodes = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return NULL;
} else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
return NULL;
}
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
pcmk__add_promotion_scores(rsc);
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
/* this information is used by sort_clone_instance() when deciding in which
* order to allocate clone instances
*/
for (GList *gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
pe_rsc_trace(rsc, "%s: Allocating %s first",
rsc->id, constraint->primary->id);
constraint->primary->cmds->allocate(constraint->primary, prefer,
data_set);
}
for (GList *gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
if (!pcmk__colocation_has_influence(constraint, NULL)) {
continue;
}
rsc->allowed_nodes = constraint->dependent->cmds->merge_weights(
constraint->dependent, rsc->id, rsc->allowed_nodes,
constraint->node_attribute, (float)constraint->score / INFINITY,
(pe_weights_rollback | pe_weights_positive));
}
pe__show_node_weights(!pcmk_is_set(data_set->flags, pe_flag_show_scores),
rsc, __func__, rsc->allowed_nodes, data_set);
nodes = g_hash_table_get_values(rsc->allowed_nodes);
nodes = pcmk__sort_nodes(nodes, NULL, data_set);
rsc->children = g_list_sort_with_data(rsc->children, sort_clone_instance, data_set);
distribute_children(rsc, rsc->children, nodes, clone_data->clone_max, clone_data->clone_node_max, data_set);
g_list_free(nodes);
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
pcmk__set_instance_roles(rsc, data_set);
}
pe__clear_resource_flags(rsc, pe_rsc_provisional|pe_rsc_allocating);
pe_rsc_trace(rsc, "Done allocating %s", rsc->id);
return NULL;
}
static void
clone_update_pseudo_status(pe_resource_t * rsc, gboolean * stopping, gboolean * starting,
gboolean * active)
{
GList *gIter = NULL;
if (rsc->children) {
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
clone_update_pseudo_status(child, stopping, starting, active);
}
return;
}
CRM_ASSERT(active != NULL);
CRM_ASSERT(starting != NULL);
CRM_ASSERT(stopping != NULL);
if (rsc->running_on) {
*active = TRUE;
}
gIter = rsc->actions;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (*starting && *stopping) {
return;
} else if (pcmk_is_set(action->flags, pe_action_optional)) {
pe_rsc_trace(rsc, "Skipping optional: %s", action->uuid);
continue;
} else if (!pcmk_any_flags_set(action->flags,
pe_action_pseudo|pe_action_runnable)) {
pe_rsc_trace(rsc, "Skipping unrunnable: %s", action->uuid);
continue;
} else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_casei)) {
pe_rsc_trace(rsc, "Stopping due to: %s", action->uuid);
*stopping = TRUE;
} else if (pcmk__str_eq(RSC_START, action->task, pcmk__str_casei)) {
if (!pcmk_is_set(action->flags, pe_action_runnable)) {
pe_rsc_trace(rsc, "Skipping pseudo-op: %s run=%d, pseudo=%d",
action->uuid,
pcmk_is_set(action->flags, pe_action_runnable),
pcmk_is_set(action->flags, pe_action_pseudo));
} else {
pe_rsc_trace(rsc, "Starting due to: %s", action->uuid);
pe_rsc_trace(rsc, "%s run=%d, pseudo=%d",
action->uuid,
pcmk_is_set(action->flags, pe_action_runnable),
pcmk_is_set(action->flags, pe_action_pseudo));
*starting = TRUE;
}
}
}
}
static pe_action_t *
find_rsc_action(pe_resource_t *rsc, const char *task)
{
pe_action_t *match = NULL;
GList *actions = pe__resource_actions(rsc, NULL, task, FALSE);
for (GList *item = actions; item != NULL; item = item->next) {
pe_action_t *op = (pe_action_t *) item->data;
if (!pcmk_is_set(op->flags, pe_action_optional)) {
if (match != NULL) {
// More than one match, don't return any
match = NULL;
break;
}
match = op;
}
}
g_list_free(actions);
return match;
}
static void
child_ordering_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
{
pe_action_t *stop = NULL;
pe_action_t *start = NULL;
pe_action_t *last_stop = NULL;
pe_action_t *last_start = NULL;
GList *gIter = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
if (clone_data->ordered == FALSE) {
return;
}
/* we have to maintain a consistent sorted child list when building order constraints */
rsc->children = g_list_sort(rsc->children, sort_rsc_id);
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
stop = find_rsc_action(child, RSC_STOP);
if (stop) {
if (last_stop) {
/* child/child relative stop */
order_actions(stop, last_stop, pe_order_optional);
}
last_stop = stop;
}
start = find_rsc_action(child, RSC_START);
if (start) {
if (last_start) {
/* child/child relative start */
order_actions(last_start, start, pe_order_optional);
}
last_start = start;
}
}
}
void
clone_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
clone_create_pseudo_actions(rsc, rsc->children, &clone_data->start_notify, &clone_data->stop_notify,data_set);
child_ordering_constraints(rsc, data_set);
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
create_promotable_actions(rsc, data_set);
}
}
void
clone_create_pseudo_actions(
pe_resource_t * rsc, GList *children, notify_data_t **start_notify, notify_data_t **stop_notify, pe_working_set_t * data_set)
{
gboolean child_active = FALSE;
gboolean child_starting = FALSE;
gboolean child_stopping = FALSE;
gboolean allow_dependent_migrations = TRUE;
pe_action_t *stop = NULL;
pe_action_t *stopped = NULL;
pe_action_t *start = NULL;
pe_action_t *started = NULL;
pe_rsc_trace(rsc, "Creating actions for %s", rsc->id);
for (GList *gIter = children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
gboolean starting = FALSE;
gboolean stopping = FALSE;
child_rsc->cmds->create_actions(child_rsc, data_set);
clone_update_pseudo_status(child_rsc, &stopping, &starting, &child_active);
if (stopping && starting) {
allow_dependent_migrations = FALSE;
}
child_stopping |= stopping;
child_starting |= starting;
}
/* start */
start = pcmk__new_rsc_pseudo_action(rsc, RSC_START, !child_starting, true);
started = pcmk__new_rsc_pseudo_action(rsc, RSC_STARTED, !child_starting,
false);
started->priority = INFINITY;
if (child_active || child_starting) {
pe__set_action_flags(started, pe_action_runnable);
}
if (start_notify != NULL && *start_notify == NULL) {
*start_notify = pcmk__clone_notif_pseudo_ops(rsc, RSC_START, start,
started);
}
/* stop */
stop = pcmk__new_rsc_pseudo_action(rsc, RSC_STOP, !child_stopping, true);
stopped = pcmk__new_rsc_pseudo_action(rsc, RSC_STOPPED, !child_stopping,
true);
stopped->priority = INFINITY;
if (allow_dependent_migrations) {
pe__set_action_flags(stop, pe_action_migrate_runnable);
}
if (stop_notify != NULL && *stop_notify == NULL) {
*stop_notify = pcmk__clone_notif_pseudo_ops(rsc, RSC_STOP, stop,
stopped);
if (start_notify && *start_notify && *stop_notify) {
order_actions((*stop_notify)->post_done, (*start_notify)->pre, pe_order_optional);
}
}
}
void
clone_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
{
pe_resource_t *last_rsc = NULL;
GList *gIter;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id);
pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
pe_order_optional, data_set);
pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
pe_order_runnable_left, data_set);
pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
pe_order_runnable_left, data_set);
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP,
pe_order_optional, data_set);
pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE,
pe_order_runnable_left, data_set);
}
if (clone_data->ordered) {
/* we have to maintain a consistent sorted child list when building order constraints */
rsc->children = g_list_sort(rsc->children, sort_rsc_id);
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->internal_constraints(child_rsc, data_set);
pcmk__order_starts(rsc, child_rsc,
pe_order_runnable_left|pe_order_implies_first_printed,
data_set);
pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED,
pe_order_implies_then_printed, data_set);
if (clone_data->ordered && last_rsc) {
pcmk__order_starts(last_rsc, child_rsc, pe_order_optional,
data_set);
}
pcmk__order_stops(rsc, child_rsc, pe_order_implies_first_printed,
data_set);
pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED,
pe_order_implies_then_printed, data_set);
if (clone_data->ordered && last_rsc) {
pcmk__order_stops(child_rsc, last_rsc, pe_order_optional, data_set);
}
last_rsc = child_rsc;
}
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
promotable_constraints(rsc, data_set);
}
}
gboolean
is_child_compatible(pe_resource_t *child_rsc, pe_node_t * local_node, enum rsc_role_e filter, gboolean current)
{
pe_node_t *node = NULL;
enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current);
CRM_CHECK(child_rsc && local_node, return FALSE);
if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
/* We only want instances that haven't failed */
node = child_rsc->fns->location(child_rsc, NULL, current);
}
if (filter != RSC_ROLE_UNKNOWN && next_role != filter) {
crm_trace("Filtered %s", child_rsc->id);
return FALSE;
}
if (node && (node->details == local_node->details)) {
return TRUE;
} else if (node) {
crm_trace("%s - %s vs %s", child_rsc->id, node->details->uname,
local_node->details->uname);
} else {
crm_trace("%s - not allocated %d", child_rsc->id, current);
}
return FALSE;
}
pe_resource_t *
find_compatible_child(pe_resource_t *local_child, pe_resource_t *rsc,
enum rsc_role_e filter, gboolean current,
pe_working_set_t *data_set)
{
pe_resource_t *pair = NULL;
GList *gIter = NULL;
GList *scratch = NULL;
pe_node_t *local_node = NULL;
local_node = local_child->fns->location(local_child, NULL, current);
if (local_node) {
return find_compatible_child_by_node(local_child, local_node, rsc, filter, current);
}
scratch = g_hash_table_get_values(local_child->allowed_nodes);
scratch = pcmk__sort_nodes(scratch, NULL, data_set);
gIter = scratch;
for (; gIter != NULL; gIter = gIter->next) {
pe_node_t *node = (pe_node_t *) gIter->data;
pair = find_compatible_child_by_node(local_child, node, rsc, filter, current);
if (pair) {
goto done;
}
}
pe_rsc_debug(rsc, "Can't pair %s with %s", local_child->id, rsc->id);
done:
g_list_free(scratch);
return pair;
}
void
clone_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set)
{
/* -- Never called --
*
* Instead we add the colocation constraints to the child and call from there
*/
CRM_ASSERT(FALSE);
}
void
clone_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set)
{
GList *gIter = NULL;
gboolean do_interleave = FALSE;
const char *interleave_s = NULL;
CRM_CHECK(constraint != NULL, return);
CRM_CHECK(dependent != NULL,
pe_err("dependent was NULL for %s", constraint->id); return);
CRM_CHECK(primary != NULL,
pe_err("primary was NULL for %s", constraint->id); return);
CRM_CHECK(dependent->variant == pe_native, return);
pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
constraint->id, dependent->id, primary->id, constraint->score);
if (pcmk_is_set(primary->flags, pe_rsc_promotable)) {
if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
pe_rsc_trace(primary, "%s is still provisional", primary->id);
return;
} else if (constraint->primary_role == RSC_ROLE_UNKNOWN) {
pe_rsc_trace(primary, "Handling %s as a clone colocation",
constraint->id);
} else {
promotable_colocation_rh(dependent, primary, constraint, data_set);
return;
}
}
/* only the LHS side needs to be labeled as interleave */
interleave_s = g_hash_table_lookup(constraint->dependent->meta,
XML_RSC_ATTR_INTERLEAVE);
if (crm_is_true(interleave_s)
&& (constraint->dependent->variant > pe_group)) {
/* @TODO Do we actually care about multiple primary copies sharing a
* dependent copy anymore?
*/
if (copies_per_node(constraint->dependent) != copies_per_node(constraint->primary)) {
pcmk__config_err("Cannot interleave %s and %s because they do not "
"support the same number of instances per node",
constraint->dependent->id,
constraint->primary->id);
} else {
do_interleave = TRUE;
}
}
if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
pe_rsc_trace(primary, "%s is still provisional", primary->id);
return;
} else if (do_interleave) {
pe_resource_t *primary_instance = NULL;
primary_instance = find_compatible_child(dependent, primary,
RSC_ROLE_UNKNOWN, FALSE,
data_set);
if (primary_instance != NULL) {
pe_rsc_debug(primary, "Pairing %s with %s",
dependent->id, primary_instance->id);
dependent->cmds->rsc_colocation_lh(dependent, primary_instance,
constraint, data_set);
} else if (constraint->score >= INFINITY) {
crm_notice("Cannot pair %s with instance of %s",
dependent->id, primary->id);
pcmk__assign_resource(dependent, NULL, true);
} else {
pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
dependent->id, primary->id);
}
return;
} else if (constraint->score >= INFINITY) {
GList *affected_nodes = NULL;
gIter = primary->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
pe_rsc_trace(primary, "Allowing %s: %s %d",
constraint->id, chosen->details->uname,
chosen->weight);
affected_nodes = g_list_prepend(affected_nodes, chosen);
}
}
node_list_exclude(dependent->allowed_nodes, affected_nodes, FALSE);
g_list_free(affected_nodes);
return;
}
gIter = primary->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->rsc_colocation_rh(dependent, child_rsc, constraint,
data_set);
}
}
enum action_tasks
clone_child_action(pe_action_t * action)
{
enum action_tasks result = no_action;
pe_resource_t *child = (pe_resource_t *) action->rsc->children->data;
if (pcmk__strcase_any_of(action->task, "notify", "notified", NULL)) {
/* Find the action we're notifying about instead */
int stop = 0;
char *key = action->uuid;
int lpc = strlen(key);
for (; lpc > 0; lpc--) {
if (key[lpc] == '_' && stop == 0) {
stop = lpc;
} else if (key[lpc] == '_') {
char *task_mutable = NULL;
lpc++;
task_mutable = strdup(key + lpc);
task_mutable[stop - lpc] = 0;
crm_trace("Extracted action '%s' from '%s'", task_mutable, key);
result = get_complex_task(child, task_mutable, TRUE);
free(task_mutable);
break;
}
}
} else {
result = get_complex_task(child, action->task, TRUE);
}
return result;
}
#define pe__clear_action_summary_flags(flags, action, flag) do { \
flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Action summary", action->rsc->id, \
flags, flag, #flag); \
} while (0)
enum pe_action_flags
summary_action_flags(pe_action_t * action, GList *children, pe_node_t * node)
{
GList *gIter = NULL;
gboolean any_runnable = FALSE;
gboolean check_runnable = TRUE;
enum action_tasks task = clone_child_action(action);
enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo);
const char *task_s = task2text(task);
for (gIter = children; gIter != NULL; gIter = gIter->next) {
pe_action_t *child_action = NULL;
pe_resource_t *child = (pe_resource_t *) gIter->data;
child_action = find_first_action(child->actions, NULL, task_s, child->children ? NULL : node);
pe_rsc_trace(action->rsc, "Checking for %s in %s on %s (%s)", task_s, child->id,
node ? node->details->uname : "none", child_action?child_action->uuid:"NA");
if (child_action) {
enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node);
if (pcmk_is_set(flags, pe_action_optional)
&& !pcmk_is_set(child_flags, pe_action_optional)) {
pe_rsc_trace(child, "%s is mandatory because of %s", action->uuid,
child_action->uuid);
pe__clear_action_summary_flags(flags, action, pe_action_optional);
pe__clear_action_flags(action, pe_action_optional);
}
if (pcmk_is_set(child_flags, pe_action_runnable)) {
any_runnable = TRUE;
}
}
}
if (check_runnable && any_runnable == FALSE) {
pe_rsc_trace(action->rsc, "%s is not runnable because no children are", action->uuid);
pe__clear_action_summary_flags(flags, action, pe_action_runnable);
if (node == NULL) {
pe__clear_action_flags(action, pe_action_runnable);
}
}
return flags;
}
enum pe_action_flags
clone_action_flags(pe_action_t * action, pe_node_t * node)
{
return summary_action_flags(action, action->rsc->children, node);
}
void
clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
{
GList *gIter = rsc->children;
pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id);
pcmk__apply_location(constraint, rsc);
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->rsc_location(child_rsc, constraint);
}
}
void
clone_expand(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GList *gIter = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
g_list_foreach(rsc->actions, (GFunc) rsc->cmds->action_flags, NULL);
pcmk__create_notifications(rsc, clone_data->start_notify);
pcmk__create_notifications(rsc, clone_data->stop_notify);
pcmk__create_notifications(rsc, clone_data->promote_notify);
pcmk__create_notifications(rsc, clone_data->demote_notify);
/* Now that the notifcations have been created we can expand the children */
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->expand(child_rsc, data_set);
}
native_expand(rsc, data_set);
/* The notifications are in the graph now, we can destroy the notify_data */
pcmk__free_notification_data(clone_data->demote_notify);
clone_data->demote_notify = NULL;
pcmk__free_notification_data(clone_data->stop_notify);
clone_data->stop_notify = NULL;
pcmk__free_notification_data(clone_data->start_notify);
clone_data->start_notify = NULL;
pcmk__free_notification_data(clone_data->promote_notify);
clone_data->promote_notify = NULL;
}
// Check whether a resource or any of its children is known on node
static bool
rsc_known_on(const pe_resource_t *rsc, const pe_node_t *node)
{
if (rsc->children) {
for (GList *child_iter = rsc->children; child_iter != NULL;
child_iter = child_iter->next) {
pe_resource_t *child = (pe_resource_t *) child_iter->data;
if (rsc_known_on(child, node)) {
return TRUE;
}
}
} else if (rsc->known_on) {
GHashTableIter iter;
pe_node_t *known_node = NULL;
g_hash_table_iter_init(&iter, rsc->known_on);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) {
if (node->details == known_node->details) {
return TRUE;
}
}
}
return FALSE;
}
// Look for an instance of clone that is known on node
static pe_resource_t *
find_instance_on(const pe_resource_t *clone, const pe_node_t *node)
{
for (GList *gIter = clone->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
if (rsc_known_on(child, node)) {
return child;
}
}
return NULL;
}
// For unique clones, probe each instance separately
static gboolean
probe_unique_clone(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete,
gboolean force, pe_working_set_t *data_set)
{
gboolean any_created = FALSE;
for (GList *child_iter = rsc->children; child_iter != NULL;
child_iter = child_iter->next) {
pe_resource_t *child = (pe_resource_t *) child_iter->data;
any_created |= child->cmds->create_probe(child, node, complete, force,
data_set);
}
return any_created;
}
// For anonymous clones, only a single instance needs to be probed
static gboolean
probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node,
pe_action_t *complete, gboolean force,
pe_working_set_t *data_set)
{
// First, check if we probed an instance on this node last time
pe_resource_t *child = find_instance_on(rsc, node);
// Otherwise, check if we plan to start an instance on this node
if (child == NULL) {
for (GList *child_iter = rsc->children; child_iter && !child;
child_iter = child_iter->next) {
pe_node_t *local_node = NULL;
pe_resource_t *child_rsc = (pe_resource_t *) child_iter->data;
if (child_rsc) { /* make clang analyzer happy */
local_node = child_rsc->fns->location(child_rsc, NULL, FALSE);
if (local_node && (local_node->details == node->details)) {
child = child_rsc;
}
}
}
}
// Otherwise, use the first clone instance
if (child == NULL) {
child = rsc->children->data;
}
CRM_ASSERT(child);
return child->cmds->create_probe(child, node, complete, force, data_set);
}
gboolean
clone_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete,
gboolean force, pe_working_set_t * data_set)
{
gboolean any_created = FALSE;
CRM_ASSERT(rsc);
rsc->children = g_list_sort(rsc->children, sort_rsc_id);
if (rsc->children == NULL) {
pe_warn("Clone %s has no children", rsc->id);
return FALSE;
}
if (rsc->exclusive_discover) {
pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (allowed && allowed->rsc_discover_mode != pe_discover_exclusive) {
/* exclusive discover is enabled and this node is not marked
* as a node this resource should be discovered on
*
* remove the node from allowed_nodes so that the
* notification contains only nodes that we might ever run
* on
*/
g_hash_table_remove(rsc->allowed_nodes, node->details->id);
/* Bit of a shortcut - might as well take it */
return FALSE;
}
}
if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
any_created = probe_unique_clone(rsc, node, complete, force, data_set);
} else {
any_created = probe_anonymous_clone(rsc, node, complete, force,
data_set);
}
return any_created;
}
void
clone_append_meta(pe_resource_t * rsc, xmlNode * xml)
{
char *name = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
name = crm_meta_name(XML_RSC_ATTR_UNIQUE);
crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_unique));
free(name);
name = crm_meta_name(XML_RSC_ATTR_NOTIFY);
crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_notify));
free(name);
name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX);
crm_xml_add_int(xml, name, clone_data->clone_max);
free(name);
name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX);
crm_xml_add_int(xml, name, clone_data->clone_node_max);
free(name);
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX);
crm_xml_add_int(xml, name, clone_data->promoted_max);
free(name);
name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX);
crm_xml_add_int(xml, name, clone_data->promoted_node_max);
free(name);
/* @COMPAT Maintain backward compatibility with resource agents that
* expect the old names (deprecated since 2.0.0).
*/
name = crm_meta_name(PCMK_XE_PROMOTED_MAX_LEGACY);
crm_xml_add_int(xml, name, clone_data->promoted_max);
free(name);
name = crm_meta_name(PCMK_XE_PROMOTED_NODE_MAX_LEGACY);
crm_xml_add_int(xml, name, clone_data->promoted_node_max);
free(name);
}
}
// Clone implementation of resource_alloc_functions_t:add_utilization()
void
pcmk__clone_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization)
{
bool existing = false;
pe_resource_t *child = NULL;
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return;
}
// Look for any child already existing in the list
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
child = (pe_resource_t *) iter->data;
if (g_list_find(all_rscs, child)) {
existing = true; // Keep checking remaining children
} else {
// If this is a clone of a group, look for group's members
for (GList *member_iter = child->children; member_iter != NULL;
member_iter = member_iter->next) {
pe_resource_t *member = (pe_resource_t *) member_iter->data;
if (g_list_find(all_rscs, member) != NULL) {
// Add *child's* utilization, not group member's
child->cmds->add_utilization(child, orig_rsc, all_rscs,
utilization);
existing = true;
break;
}
}
}
}
if (!existing && (rsc->children != NULL)) {
// If nothing was found, still add first child's utilization
child = (pe_resource_t *) rsc->children->data;
child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization);
}
}
+
+// Clone implementation of resource_alloc_functions_t:shutdown_lock()
+void
+pcmk__clone_shutdown_lock(pe_resource_t *rsc)
+{
+ return; // Clones currently don't support shutdown locks
+}
diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c
index cedb66879c..bbda9783d3 100644
--- a/lib/pacemaker/pcmk_sched_group.c
+++ b/lib/pacemaker/pcmk_sched_group.c
@@ -1,683 +1,694 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include "libpacemaker_private.h"
#define VARIANT_GROUP 1
#include
/*!
* \internal
* \brief Expand a group's colocations to its members
*
* \param[in,out] rsc Group resource
*/
static void
expand_group_colocations(pe_resource_t *rsc)
{
group_variant_data_t *group_data = NULL;
pe_resource_t *member = NULL;
bool any_unmanaged = false;
get_group_variant_data(group_data, rsc);
// Treat "group with R" colocations as "first member with R"
member = group_data->first_child;
member->rsc_cons = g_list_concat(member->rsc_cons, rsc->rsc_cons);
/* The above works for the whole group because each group member is
* colocated with the previous one.
*
* However, there is a special case when a group has a mandatory colocation
* with a resource that can't start. In that case,
* pcmk__block_colocated_starts() will ensure that dependent resources in
* mandatory colocations (i.e. the first member for groups) can't start
* either. But if any group member is unmanaged and already started, the
* internal group colocations are no longer sufficient to make that apply to
* later members.
*
* To handle that case, add mandatory colocations to each member after the
* first.
*/
any_unmanaged = !pcmk_is_set(member->flags, pe_rsc_managed);
for (GList *item = rsc->children->next; item != NULL; item = item->next) {
member = item->data;
if (any_unmanaged) {
for (GList *cons_iter = rsc->rsc_cons; cons_iter != NULL;
cons_iter = cons_iter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) cons_iter->data;
if (constraint->score == INFINITY) {
member->rsc_cons = g_list_prepend(member->rsc_cons, constraint);
}
}
} else if (!pcmk_is_set(member->flags, pe_rsc_managed)) {
any_unmanaged = true;
}
}
rsc->rsc_cons = NULL;
// Treat "R with group" colocations as "R with last member"
member = group_data->last_child;
member->rsc_cons_lhs = g_list_concat(member->rsc_cons_lhs,
rsc->rsc_cons_lhs);
rsc->rsc_cons_lhs = NULL;
}
pe_node_t *
pcmk__group_allocate(pe_resource_t *rsc, pe_node_t *prefer,
pe_working_set_t *data_set)
{
pe_node_t *node = NULL;
pe_node_t *group_node = NULL;
GList *gIter = NULL;
group_variant_data_t *group_data = NULL;
get_group_variant_data(group_data, rsc);
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to;
}
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
return NULL;
}
if (group_data->first_child == NULL) {
// Nothing to allocate
pe__clear_resource_flags(rsc, pe_rsc_provisional);
return NULL;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
rsc->role = group_data->first_child->role;
expand_group_colocations(rsc);
pe__show_node_weights(!pcmk_is_set(data_set->flags, pe_flag_show_scores),
rsc, __func__, rsc->allowed_nodes, data_set);
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
pe_rsc_trace(rsc, "Allocating group %s member %s",
rsc->id, child_rsc->id);
node = child_rsc->cmds->allocate(child_rsc, prefer, data_set);
if (group_node == NULL) {
group_node = node;
}
}
pe__set_next_role(rsc, group_data->first_child->next_role,
"first group member");
pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
if (group_data->colocated) {
return group_node;
}
return NULL;
}
void group_update_pseudo_status(pe_resource_t * parent, pe_resource_t * child);
void
group_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
{
pe_action_t *op = NULL;
const char *value = NULL;
GList *gIter = rsc->children;
pe_rsc_trace(rsc, "Creating actions for %s", rsc->id);
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->create_actions(child_rsc, data_set);
group_update_pseudo_status(rsc, child_rsc);
}
op = start_action(rsc, NULL, TRUE /* !group_data->child_starting */ );
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
op = custom_action(rsc, started_key(rsc),
RSC_STARTED, NULL, TRUE /* !group_data->child_starting */ , TRUE, data_set);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
op = stop_action(rsc, NULL, TRUE /* !group_data->child_stopping */ );
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
op = custom_action(rsc, stopped_key(rsc),
RSC_STOPPED, NULL, TRUE /* !group_data->child_stopping */ , TRUE, data_set);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTABLE);
if (crm_is_true(value)) {
op = custom_action(rsc, demote_key(rsc), RSC_DEMOTE, NULL, TRUE, TRUE, data_set);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
op = custom_action(rsc, demoted_key(rsc), RSC_DEMOTED, NULL, TRUE, TRUE, data_set);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
op = custom_action(rsc, promote_key(rsc), RSC_PROMOTE, NULL, TRUE, TRUE, data_set);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
op = custom_action(rsc, promoted_key(rsc), RSC_PROMOTED, NULL, TRUE, TRUE, data_set);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
}
}
void
group_update_pseudo_status(pe_resource_t * parent, pe_resource_t * child)
{
GList *gIter = child->actions;
group_variant_data_t *group_data = NULL;
get_group_variant_data(group_data, parent);
if (group_data->ordered == FALSE) {
/* If this group is not ordered, then leave the meta-actions as optional */
return;
}
if (group_data->child_stopping && group_data->child_starting) {
return;
}
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (pcmk_is_set(action->flags, pe_action_optional)) {
continue;
}
if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_casei)
&& pcmk_is_set(action->flags, pe_action_runnable)) {
group_data->child_stopping = TRUE;
pe_rsc_trace(action->rsc, "Based on %s the group is stopping", action->uuid);
} else if (pcmk__str_eq(RSC_START, action->task, pcmk__str_casei)
&& pcmk_is_set(action->flags, pe_action_runnable)) {
group_data->child_starting = TRUE;
pe_rsc_trace(action->rsc, "Based on %s the group is starting", action->uuid);
}
}
}
void
group_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GList *gIter = rsc->children;
pe_resource_t *last_rsc = NULL;
pe_resource_t *last_active = NULL;
pe_resource_t *top = uber_parent(rsc);
group_variant_data_t *group_data = NULL;
get_group_variant_data(group_data, rsc);
pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
pe_order_optional, data_set);
pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
pe_order_runnable_left, data_set);
pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
pe_order_runnable_left, data_set);
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
int stop = pe_order_none;
int stopped = pe_order_implies_then_printed;
int start = pe_order_implies_then | pe_order_runnable_left;
int started =
pe_order_runnable_left | pe_order_implies_then | pe_order_implies_then_printed;
child_rsc->cmds->internal_constraints(child_rsc, data_set);
if (last_rsc == NULL) {
if (group_data->ordered) {
pe__set_order_flags(stop, pe_order_optional);
stopped = pe_order_implies_then;
}
} else if (group_data->colocated) {
pcmk__new_colocation("group:internal_colocation", NULL, INFINITY,
child_rsc, last_rsc, NULL, NULL,
pcmk_is_set(child_rsc->flags, pe_rsc_critical),
data_set);
}
if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
pcmk__order_resource_actions(rsc, RSC_DEMOTE, child_rsc, RSC_DEMOTE,
stop|pe_order_implies_first_printed,
data_set);
pcmk__order_resource_actions(child_rsc, RSC_DEMOTE, rsc,
RSC_DEMOTED, stopped, data_set);
pcmk__order_resource_actions(child_rsc, RSC_PROMOTE, rsc,
RSC_PROMOTED, started, data_set);
pcmk__order_resource_actions(rsc, RSC_PROMOTE, child_rsc,
RSC_PROMOTE,
pe_order_implies_first_printed,
data_set);
}
pcmk__order_starts(rsc, child_rsc, pe_order_implies_first_printed,
data_set);
pcmk__order_stops(rsc, child_rsc,
stop|pe_order_implies_first_printed, data_set);
pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED,
stopped, data_set);
pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED,
started, data_set);
if (group_data->ordered == FALSE) {
pcmk__order_starts(rsc, child_rsc,
start|pe_order_implies_first_printed, data_set);
if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
pcmk__order_resource_actions(rsc, RSC_PROMOTE, child_rsc,
RSC_PROMOTE,
start|pe_order_implies_first_printed,
data_set);
}
} else if (last_rsc != NULL) {
pcmk__order_starts(last_rsc, child_rsc, start, data_set);
pcmk__order_stops(child_rsc, last_rsc,
pe_order_optional|pe_order_restart, data_set);
if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
pcmk__order_resource_actions(last_rsc, RSC_PROMOTE, child_rsc,
RSC_PROMOTE, start, data_set);
pcmk__order_resource_actions(child_rsc, RSC_DEMOTE, last_rsc,
RSC_DEMOTE, pe_order_optional,
data_set);
}
} else {
pcmk__order_starts(rsc, child_rsc, pe_order_none, data_set);
if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
pcmk__order_resource_actions(rsc, RSC_PROMOTE, child_rsc,
RSC_PROMOTE, pe_order_none,
data_set);
}
}
/* Look for partially active groups
* Make sure they still shut down in sequence
*/
if (child_rsc->running_on) {
if (group_data->ordered
&& last_rsc
&& last_rsc->running_on == NULL && last_active && last_active->running_on) {
pcmk__order_stops(child_rsc, last_active, pe_order_optional,
data_set);
}
last_active = child_rsc;
}
last_rsc = child_rsc;
}
if (group_data->ordered && last_rsc != NULL) {
int stop_stop_flags = pe_order_implies_then;
int stop_stopped_flags = pe_order_optional;
pcmk__order_stops(rsc, last_rsc, stop_stop_flags, data_set);
pcmk__order_resource_actions(last_rsc, RSC_STOP, rsc, RSC_STOPPED,
stop_stopped_flags, data_set);
if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
pcmk__order_resource_actions(rsc, RSC_DEMOTE, last_rsc, RSC_DEMOTE,
stop_stop_flags, data_set);
pcmk__order_resource_actions(last_rsc, RSC_DEMOTE, rsc, RSC_DEMOTED,
stop_stopped_flags, data_set);
}
}
}
void
group_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set)
{
GList *gIter = NULL;
group_variant_data_t *group_data = NULL;
if (dependent == NULL) {
pe_err("dependent was NULL for %s", constraint->id);
return;
} else if (primary == NULL) {
pe_err("primary was NULL for %s", constraint->id);
return;
}
gIter = dependent->children;
pe_rsc_trace(dependent, "Processing constraints from %s", dependent->id);
get_group_variant_data(group_data, dependent);
if (group_data->colocated) {
group_data->first_child->cmds->rsc_colocation_lh(group_data->first_child,
primary, constraint,
data_set);
return;
} else if (constraint->score >= INFINITY) {
pcmk__config_err("%s: Cannot perform mandatory colocation "
"between non-colocated group and %s",
dependent->id, primary->id);
return;
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->rsc_colocation_lh(child_rsc, primary, constraint,
data_set);
}
}
void
group_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set)
{
GList *gIter = primary->children;
group_variant_data_t *group_data = NULL;
get_group_variant_data(group_data, primary);
CRM_CHECK(dependent->variant == pe_native, return);
pe_rsc_trace(primary, "Processing RH %s of constraint %s (LH is %s)",
primary->id, constraint->id, dependent->id);
if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
return;
} else if (group_data->colocated && group_data->first_child) {
if (constraint->score >= INFINITY) {
/* Ensure RHS is _fully_ up before can start LHS */
group_data->last_child->cmds->rsc_colocation_rh(dependent,
group_data->last_child,
constraint,
data_set);
} else {
/* A partially active RHS is fine */
group_data->first_child->cmds->rsc_colocation_rh(dependent,
group_data->first_child,
constraint,
data_set);
}
return;
} else if (constraint->score >= INFINITY) {
pcmk__config_err("%s: Cannot perform mandatory colocation with"
" non-colocated group %s", dependent->id, primary->id);
return;
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->rsc_colocation_rh(dependent, child_rsc, constraint,
data_set);
}
}
enum pe_action_flags
group_action_flags(pe_action_t * action, pe_node_t * node)
{
GList *gIter = NULL;
enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo);
for (gIter = action->rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
enum action_tasks task = get_complex_task(child, action->task, TRUE);
const char *task_s = task2text(task);
pe_action_t *child_action = find_first_action(child->actions, NULL, task_s, node);
if (child_action) {
enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node);
if (pcmk_is_set(flags, pe_action_optional)
&& !pcmk_is_set(child_flags, pe_action_optional)) {
pe_rsc_trace(action->rsc, "%s is mandatory because of %s", action->uuid,
child_action->uuid);
pe__clear_raw_action_flags(flags, "group action",
pe_action_optional);
pe__clear_action_flags(action, pe_action_optional);
}
if (!pcmk__str_eq(task_s, action->task, pcmk__str_casei)
&& pcmk_is_set(flags, pe_action_runnable)
&& !pcmk_is_set(child_flags, pe_action_runnable)) {
pe_rsc_trace(action->rsc, "%s is not runnable because of %s", action->uuid,
child_action->uuid);
pe__clear_raw_action_flags(flags, "group action",
pe_action_runnable);
pe__clear_action_flags(action, pe_action_runnable);
}
} else if (task != stop_rsc && task != action_demote) {
pe_rsc_trace(action->rsc, "%s is not runnable because of %s (not found in %s)",
action->uuid, task_s, child->id);
pe__clear_raw_action_flags(flags, "group action",
pe_action_runnable);
}
}
return flags;
}
enum pe_graph_flags
group_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node,
enum pe_action_flags flags, enum pe_action_flags filter,
enum pe_ordering type, pe_working_set_t *data_set)
{
GList *gIter = then->rsc->children;
enum pe_graph_flags changed = pe_graph_none;
CRM_ASSERT(then->rsc != NULL);
changed |= native_update_actions(first, then, node, flags, filter, type,
data_set);
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child = (pe_resource_t *) gIter->data;
pe_action_t *child_action = find_first_action(child->actions, NULL, then->task, node);
if (child_action) {
changed |= child->cmds->update_actions(first, child_action, node,
flags, filter, type,
data_set);
}
}
return changed;
}
void
group_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
{
GList *gIter = rsc->children;
GList *saved = constraint->node_list_rh;
GList *zero = pcmk__copy_node_list(constraint->node_list_rh, true);
gboolean reset_scores = TRUE;
group_variant_data_t *group_data = NULL;
get_group_variant_data(group_data, rsc);
pe_rsc_debug(rsc, "Processing rsc_location %s for %s", constraint->id, rsc->id);
pcmk__apply_location(constraint, rsc);
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->rsc_location(child_rsc, constraint);
if (group_data->colocated && reset_scores) {
reset_scores = FALSE;
constraint->node_list_rh = zero;
}
}
constraint->node_list_rh = saved;
g_list_free_full(zero, free);
}
void
group_expand(pe_resource_t * rsc, pe_working_set_t * data_set)
{
CRM_CHECK(rsc != NULL, return);
pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
native_expand(rsc, data_set);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->expand(child_rsc, data_set);
}
}
GHashTable *
pcmk__group_merge_weights(pe_resource_t *rsc, const char *primary_id,
GHashTable *nodes, const char *attr, float factor,
uint32_t flags)
{
GList *gIter = rsc->rsc_cons_lhs;
group_variant_data_t *group_data = NULL;
get_group_variant_data(group_data, rsc);
if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
pe_rsc_info(rsc, "Breaking dependency loop with %s at %s",
rsc->id, primary_id);
return nodes;
}
pe__set_resource_flags(rsc, pe_rsc_merging);
nodes = group_data->first_child->cmds->merge_weights(group_data->first_child,
primary_id, nodes,
attr, factor, flags);
for (; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
nodes = pcmk__native_merge_weights(constraint->dependent, rsc->id,
nodes, constraint->node_attribute,
constraint->score / (float) INFINITY,
flags);
}
pe__clear_resource_flags(rsc, pe_rsc_merging);
return nodes;
}
void
group_append_meta(pe_resource_t * rsc, xmlNode * xml)
{
}
// Group implementation of resource_alloc_functions_t:colocated_resources()
GList *
pcmk__group_colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *colocated_rscs)
{
pe_resource_t *child_rsc = NULL;
group_variant_data_t *group_data = NULL;
get_group_variant_data(group_data, rsc);
if (orig_rsc == NULL) {
orig_rsc = rsc;
}
if (group_data->colocated || pe_rsc_is_clone(rsc->parent)) {
/* This group has colocated members and/or is cloned -- either way,
* add every child's colocated resources to the list.
*/
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
child_rsc = (pe_resource_t *) gIter->data;
colocated_rscs = child_rsc->cmds->colocated_resources(child_rsc,
orig_rsc,
colocated_rscs);
}
} else if (group_data->first_child != NULL) {
/* This group's members are not colocated, and the group is not cloned,
* so just add the first child's colocations to the list.
*/
child_rsc = group_data->first_child;
colocated_rscs = child_rsc->cmds->colocated_resources(child_rsc,
orig_rsc,
colocated_rscs);
}
// Now consider colocations where the group itself is specified
colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc, colocated_rscs);
return colocated_rscs;
}
// Group implementation of resource_alloc_functions_t:add_utilization()
void
pcmk__group_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization)
{
group_variant_data_t *group_data = NULL;
pe_resource_t *child = NULL;
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return;
}
pe_rsc_trace(orig_rsc, "%s: Adding group %s as colocated utilization",
orig_rsc->id, rsc->id);
get_group_variant_data(group_data, rsc);
if (group_data->colocated || pe_rsc_is_clone(rsc->parent)) {
// Every group member will be on same node, so sum all members
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
child = (pe_resource_t *) iter->data;
if (pcmk_is_set(child->flags, pe_rsc_provisional)
&& (g_list_find(all_rscs, child) == NULL)) {
child->cmds->add_utilization(child, orig_rsc, all_rscs,
utilization);
}
}
} else {
// Just add first child's utilization
child = group_data->first_child;
if ((child != NULL)
&& pcmk_is_set(child->flags, pe_rsc_provisional)
&& (g_list_find(all_rscs, child) == NULL)) {
child->cmds->add_utilization(child, orig_rsc, all_rscs,
utilization);
}
}
}
+
+// Group implementation of resource_alloc_functions_t:shutdown_lock()
+void
+pcmk__group_shutdown_lock(pe_resource_t *rsc)
+{
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pe_resource_t *child = (pe_resource_t *) iter->data;
+
+ child->cmds->shutdown_lock(child);
+ }
+}
diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c
index 97d6dd8233..808e97540c 100644
--- a/lib/pacemaker/pcmk_sched_native.c
+++ b/lib/pacemaker/pcmk_sched_native.c
@@ -1,2407 +1,2503 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include "libpacemaker_private.h"
// The controller removes the resource from the CIB, making this redundant
// #define DELETE_THEN_REFRESH 1
#define INFINITY_HACK (INFINITY * -100)
#define VARIANT_NATIVE 1
#include
extern bool pcmk__is_daemon;
static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
pe_working_set_t *data_set);
static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
xmlNode *operation, pe_working_set_t *data_set);
static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
pe_working_set_t *data_set);
static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
xmlNode *operation, pe_working_set_t *data_set);
gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
pe_working_set_t * data_set);
gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
/* This array says what the *next* role should be when transitioning from one
* role to another. For example going from Stopped to Promoted, the next role is
* RSC_ROLE_UNPROMOTED, because the resource must be started before being promoted.
* The current state then becomes Started, which is fed into this array again,
* giving a next role of RSC_ROLE_PROMOTED.
*/
static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* Current state Next state*/
/* Unknown Stopped Started Unpromoted Promoted */
/* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED },
/* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED },
/* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
/* Unpromoted */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
/* Promoted */ { RSC_ROLE_STOPPED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
};
typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
gboolean optional,
pe_working_set_t *data_set);
// This array picks the function needed to transition from one role to another
static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* Current state Next state */
/* Unknown Stopped Started Unpromoted Promoted */
/* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
/* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
/* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
/* Unpromoted */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
/* Promoted */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
};
#define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear) do { \
flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Node weight", (nw_rsc)->id, (flags), \
(flags_to_clear), #flags_to_clear); \
} while (0)
static bool
native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
{
GList *nodes = NULL;
pe_node_t *chosen = NULL;
pe_node_t *best = NULL;
int multiple = 1;
int length = 0;
bool result = false;
pcmk__ban_insufficient_capacity(rsc, &prefer, data_set);
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to != NULL;
}
// Sort allowed nodes by weight
if (rsc->allowed_nodes) {
length = g_hash_table_size(rsc->allowed_nodes);
}
if (length > 0) {
nodes = g_hash_table_get_values(rsc->allowed_nodes);
nodes = pcmk__sort_nodes(nodes, pe__current_node(rsc), data_set);
// First node in sorted list has the best score
best = g_list_nth_data(nodes, 0);
}
if (prefer && nodes) {
chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
if (chosen == NULL) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
prefer->details->uname, rsc->id);
/* Favor the preferred node as long as its weight is at least as good as
* the best allowed node's.
*
* An alternative would be to favor the preferred node even if the best
* node is better, when the best node's weight is less than INFINITY.
*/
} else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
chosen->details->uname, rsc->id);
chosen = NULL;
} else if (!pcmk__node_available(chosen)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
chosen->details->uname, rsc->id);
chosen = NULL;
} else {
pe_rsc_trace(rsc,
"Chose preferred node %s for %s (ignoring %d candidates)",
chosen->details->uname, rsc->id, length);
}
}
if ((chosen == NULL) && nodes) {
/* Either there is no preferred node, or the preferred node is not
* available, but there are other nodes allowed to run the resource.
*/
chosen = best;
pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
chosen ? chosen->details->uname : "", rsc->id, length);
if (!pe_rsc_is_unique_clone(rsc->parent)
&& chosen && (chosen->weight > 0) && pcmk__node_available(chosen)) {
/* If the resource is already running on a node, prefer that node if
* it is just as good as the chosen node.
*
* We don't do this for unique clone instances, because
* distribute_children() has already assigned instances to their
* running nodes when appropriate, and if we get here, we don't want
* remaining unallocated instances to prefer a node that's already
* running another instance.
*/
pe_node_t *running = pe__current_node(rsc);
if ((running != NULL) && !pcmk__node_available(running)) {
pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
rsc->id, running->details->uname);
} else if (running) {
for (GList *iter = nodes->next; iter; iter = iter->next) {
pe_node_t *tmp = (pe_node_t *) iter->data;
if (tmp->weight != chosen->weight) {
// The nodes are sorted by weight, so no more are equal
break;
}
if (tmp->details == running->details) {
// Scores are equal, so prefer the current node
chosen = tmp;
}
multiple++;
}
}
}
}
if (multiple > 1) {
static char score[33];
int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
score2char_stack(chosen->weight, score, sizeof(score));
do_crm_log(log_level,
"Chose node %s for %s from %d nodes with score %s",
chosen->details->uname, rsc->id, multiple, score);
}
result = pcmk__assign_primitive(rsc, chosen, false);
g_list_free(nodes);
return result;
}
/*!
* \internal
* \brief Find score of highest-scored node that matches colocation attribute
*
* \param[in] rsc Resource whose allowed nodes should be searched
* \param[in] attr Colocation attribute name (must not be NULL)
* \param[in] value Colocation attribute value to require
*/
static int
best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
const char *value)
{
GHashTableIter iter;
pe_node_t *node = NULL;
int best_score = -INFINITY;
const char *best_node = NULL;
// Find best allowed node with matching attribute
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if ((node->weight > best_score) && pcmk__node_available(node)
&& pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
best_score = node->weight;
best_node = node->details->uname;
}
}
if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
if (best_node == NULL) {
crm_info("No allowed node for %s matches node attribute %s=%s",
rsc->id, attr, value);
} else {
crm_info("Allowed node %s for %s had best score (%d) "
"of those matching node attribute %s=%s",
best_node, rsc->id, best_score, attr, value);
}
}
return best_score;
}
/*!
* \internal
* \brief Add resource's colocation matches to current node allocation scores
*
* For each node in a given table, if any of a given resource's allowed nodes
* have a matching value for the colocation attribute, add the highest of those
* nodes' scores to the node's score.
*
* \param[in,out] nodes Hash table of nodes with allocation scores so far
* \param[in] rsc Resource whose allowed nodes should be compared
* \param[in] attr Colocation attribute that must match (NULL for default)
* \param[in] factor Factor by which to multiply scores being added
* \param[in] only_positive Whether to add only positive scores
*/
static void
add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
const char *attr, float factor,
bool only_positive)
{
GHashTableIter iter;
pe_node_t *node = NULL;
if (attr == NULL) {
attr = CRM_ATTR_UNAME;
}
// Iterate through each node
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
float weight_f = 0;
int weight = 0;
int score = 0;
int new_score = 0;
score = best_node_score_matching_attr(rsc, attr,
pe_node_attribute_raw(node, attr));
if ((factor < 0) && (score < 0)) {
/* Negative preference for a node with a negative score
* should not become a positive preference.
*
* @TODO Consider filtering only if weight is -INFINITY
*/
crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
node->details->uname, node->weight, factor, score);
continue;
}
if (node->weight == INFINITY_HACK) {
crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
node->details->uname, node->weight, factor, score);
continue;
}
weight_f = factor * score;
// Round the number; see http://c-faq.com/fp/round.html
weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
/* Small factors can obliterate the small scores that are often actually
* used in configurations. If the score and factor are nonzero, ensure
* that the result is nonzero as well.
*/
if ((weight == 0) && (score != 0)) {
if (factor > 0.0) {
weight = 1;
} else if (factor < 0.0) {
weight = -1;
}
}
new_score = pcmk__add_scores(weight, node->weight);
if (only_positive && (new_score < 0) && (node->weight > 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d "
"(negative disallowed, marking node unusable)",
node->details->uname, node->weight, factor, score,
new_score);
node->weight = INFINITY_HACK;
continue;
}
if (only_positive && (new_score < 0) && (node->weight == 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
node->details->uname, node->weight, factor, score,
new_score);
continue;
}
crm_trace("%s: %d + %f * %d = %d", node->details->uname,
node->weight, factor, score, new_score);
node->weight = new_score;
}
}
static inline bool
is_nonempty_group(pe_resource_t *rsc)
{
return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
}
/*!
* \internal
* \brief Incorporate colocation constraint scores into node weights
*
* \param[in,out] rsc Resource being placed
* \param[in] primary_id ID of primary resource in constraint
* \param[in,out] nodes Nodes, with scores as of this point
* \param[in] attr Colocation attribute (ID by default)
* \param[in] factor Incorporate scores multiplied by this factor
* \param[in] flags Bitmask of enum pe_weights values
*
* \return Nodes, with scores modified by this constraint
* \note This function assumes ownership of the nodes argument. The caller
* should free the returned copy rather than the original.
*/
GHashTable *
pcmk__native_merge_weights(pe_resource_t *rsc, const char *primary_id,
GHashTable *nodes, const char *attr, float factor,
uint32_t flags)
{
GHashTable *work = NULL;
// Avoid infinite recursion
if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
pe_rsc_info(rsc, "%s: Breaking dependency loop at %s",
primary_id, rsc->id);
return nodes;
}
pe__set_resource_flags(rsc, pe_rsc_merging);
if (pcmk_is_set(flags, pe_weights_init)) {
if (is_nonempty_group(rsc)) {
GList *last = g_list_last(rsc->children);
pe_resource_t *last_rsc = last->data;
pe_rsc_trace(rsc, "%s: Merging scores from group %s "
"using last member %s (at %.6f)",
primary_id, rsc->id, last_rsc->id, factor);
work = pcmk__native_merge_weights(last_rsc, primary_id, NULL, attr,
factor, flags);
} else {
work = pcmk__copy_node_table(rsc->allowed_nodes);
}
clear_node_weights_flags(flags, rsc, pe_weights_init);
} else if (is_nonempty_group(rsc)) {
/* The first member of the group will recursively incorporate any
* constraints involving other members (including the group internal
* colocation).
*
* @TODO The indirect colocations from the dependent group's other
* members will be incorporated at full strength rather than by
* factor, so the group's combined stickiness will be treated as
* (factor + (#members - 1)) * stickiness. It is questionable what
* the right approach should be.
*/
pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
"(at %.6f)", primary_id, rsc->id, factor);
work = pcmk__copy_node_table(nodes);
work = pcmk__native_merge_weights(rsc->children->data, primary_id, work,
attr, factor, flags);
} else {
pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
primary_id, rsc->id, factor);
work = pcmk__copy_node_table(nodes);
add_node_scores_matching_attr(work, rsc, attr, factor,
pcmk_is_set(flags, pe_weights_positive));
}
if (pcmk__any_node_available(work)) {
GList *gIter = NULL;
int multiplier = (factor < 0)? -1 : 1;
if (pcmk_is_set(flags, pe_weights_forward)) {
gIter = rsc->rsc_cons;
pe_rsc_trace(rsc,
"Checking additional %d optional '%s with' constraints",
g_list_length(gIter), rsc->id);
} else if (is_nonempty_group(rsc)) {
pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
gIter = last_rsc->rsc_cons_lhs;
pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
"constraints using last member %s",
g_list_length(gIter), rsc->id, last_rsc->id);
} else {
gIter = rsc->rsc_cons_lhs;
pe_rsc_trace(rsc,
"Checking additional %d optional 'with %s' constraints",
g_list_length(gIter), rsc->id);
}
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *other = NULL;
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
if (pcmk_is_set(flags, pe_weights_forward)) {
other = constraint->primary;
} else if (!pcmk__colocation_has_influence(constraint, NULL)) {
continue;
} else {
other = constraint->dependent;
}
pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
constraint->id, constraint->dependent->id,
constraint->primary->id);
work = pcmk__native_merge_weights(other, primary_id, work,
constraint->node_attribute,
multiplier * constraint->score / (float) INFINITY,
flags|pe_weights_rollback);
pe__show_node_weights(true, NULL, primary_id, work, rsc->cluster);
}
} else if (pcmk_is_set(flags, pe_weights_rollback)) {
pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
primary_id, rsc->id);
g_hash_table_destroy(work);
pe__clear_resource_flags(rsc, pe_rsc_merging);
return nodes;
}
if (pcmk_is_set(flags, pe_weights_positive)) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->weight == INFINITY_HACK) {
node->weight = 1;
}
}
}
if (nodes) {
g_hash_table_destroy(nodes);
}
pe__clear_resource_flags(rsc, pe_rsc_merging);
return work;
}
pe_node_t *
pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer,
pe_working_set_t *data_set)
{
GList *gIter = NULL;
if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
/* never allocate children on their own */
pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
rsc->parent->id);
rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to;
}
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
return NULL;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes, data_set);
for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
GHashTable *archive = NULL;
pe_resource_t *primary = constraint->primary;
if ((constraint->dependent_role >= RSC_ROLE_PROMOTED)
|| (constraint->score < 0 && constraint->score > -INFINITY)) {
archive = pcmk__copy_node_table(rsc->allowed_nodes);
}
pe_rsc_trace(rsc,
"%s: Allocating %s first (constraint=%s score=%d role=%s)",
rsc->id, primary->id, constraint->id,
constraint->score, role2text(constraint->dependent_role));
primary->cmds->allocate(primary, NULL, data_set);
rsc->cmds->rsc_colocation_lh(rsc, primary, constraint, data_set);
if (archive && !pcmk__any_node_available(rsc->allowed_nodes)) {
pe_rsc_info(rsc, "%s: Rolling back scores from %s",
rsc->id, primary->id);
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = archive;
archive = NULL;
}
if (archive) {
g_hash_table_destroy(archive);
}
}
pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes, data_set);
for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
if (!pcmk__colocation_has_influence(constraint, NULL)) {
continue;
}
pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
constraint->id, constraint->dependent->id,
constraint->primary->id);
rsc->allowed_nodes = constraint->dependent->cmds->merge_weights(
constraint->dependent, rsc->id, rsc->allowed_nodes,
constraint->node_attribute, constraint->score / (float) INFINITY,
pe_weights_rollback);
}
if (rsc->next_role == RSC_ROLE_STOPPED) {
pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
/* make sure it doesn't come up again */
resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
} else if(rsc->next_role > rsc->role
&& !pcmk_is_set(data_set->flags, pe_flag_have_quorum)
&& data_set->no_quorum_policy == no_quorum_freeze) {
crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
rsc->id, role2text(rsc->role), role2text(rsc->next_role));
pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
}
pe__show_node_weights(!pcmk_is_set(data_set->flags, pe_flag_show_scores),
rsc, __func__, rsc->allowed_nodes, data_set);
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
&& !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
pe__clear_resource_flags(rsc, pe_rsc_managed);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
const char *reason = NULL;
pe_node_t *assign_to = NULL;
pe__set_next_role(rsc, rsc->role, "unmanaged");
assign_to = pe__current_node(rsc);
if (assign_to == NULL) {
reason = "inactive";
} else if (rsc->role == RSC_ROLE_PROMOTED) {
reason = "promoted";
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
reason = "failed";
} else {
reason = "active";
}
pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
(assign_to? assign_to->details->uname : "no node"), reason);
pcmk__assign_primitive(rsc, assign_to, true);
} else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
pcmk__assign_primitive(rsc, NULL, true);
} else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
&& native_choose_node(rsc, prefer, data_set)) {
pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
rsc->allocated_to->details->uname);
} else if (rsc->allocated_to == NULL) {
if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
} else if (rsc->running_on != NULL) {
pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
}
} else {
pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
rsc->allocated_to->details->uname);
}
pe__clear_resource_flags(rsc, pe_rsc_allocating);
if (rsc->is_remote_node) {
pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
CRM_ASSERT(remote_node != NULL);
if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
crm_trace("Setting Pacemaker Remote node %s to ONLINE",
remote_node->details->id);
remote_node->details->online = TRUE;
/* We shouldn't consider an unseen remote-node unclean if we are going
* to try and connect to it. Otherwise we get an unnecessary fence */
if (remote_node->details->unseen == TRUE) {
remote_node->details->unclean = FALSE;
}
} else {
crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
remote_node->details->id, role2text(rsc->next_role),
(rsc->allocated_to? "" : "un"));
remote_node->details->shutdown = TRUE;
}
}
return rsc->allocated_to;
}
static gboolean
is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
{
gboolean dup = FALSE;
const char *id = NULL;
const char *value = NULL;
xmlNode *operation = NULL;
guint interval2_ms = 0;
CRM_ASSERT(rsc);
for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
value = crm_element_value(operation, "name");
if (!pcmk__str_eq(value, name, pcmk__str_casei)) {
continue;
}
value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval2_ms = crm_parse_interval_spec(value);
if (interval_ms != interval2_ms) {
continue;
}
if (id == NULL) {
id = ID(operation);
} else {
pcmk__config_err("Operation %s is duplicate of %s (do not use "
"same name and interval combination more "
"than once per resource)", ID(operation), id);
dup = TRUE;
}
}
}
return dup;
}
static bool
op_cannot_recur(const char *name)
{
return pcmk__strcase_any_of(name, RSC_STOP, RSC_START, RSC_DEMOTE, RSC_PROMOTE, NULL);
}
static void
RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
xmlNode * operation, pe_working_set_t * data_set)
{
char *key = NULL;
const char *name = NULL;
const char *role = NULL;
const char *interval_spec = NULL;
const char *node_uname = node? node->details->uname : "n/a";
guint interval_ms = 0;
pe_action_t *mon = NULL;
gboolean is_optional = TRUE;
GList *possible_matches = NULL;
CRM_ASSERT(rsc);
/* Only process for the operations without role="Stopped" */
role = crm_element_value(operation, "role");
if (role && text2role(role) == RSC_ROLE_STOPPED) {
return;
}
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms == 0) {
return;
}
name = crm_element_value(operation, "name");
if (is_op_dup(rsc, name, interval_ms)) {
crm_trace("Not creating duplicate recurring action %s for %dms %s",
ID(operation), interval_ms, name);
return;
}
if (op_cannot_recur(name)) {
pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
ID(operation), name);
return;
}
key = pcmk__op_key(rsc->id, name, interval_ms);
if (find_rsc_op_entry(rsc, key) == NULL) {
crm_trace("Not creating recurring action %s for disabled resource %s",
ID(operation), rsc->id);
free(key);
return;
}
pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
if (start != NULL) {
pe_rsc_trace(rsc, "Marking %s %s due to %s", key,
pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory",
start->uuid);
is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
} else {
pe_rsc_trace(rsc, "Marking %s optional", key);
is_optional = TRUE;
}
/* start a monitor for an already active resource */
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches == NULL) {
is_optional = FALSE;
pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
} else {
GList *gIter = NULL;
for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
pe_action_t *op = (pe_action_t *) gIter->data;
if (pcmk_is_set(op->flags, pe_action_reschedule)) {
is_optional = FALSE;
break;
}
}
g_list_free(possible_matches);
}
if (((rsc->next_role == RSC_ROLE_PROMOTED) && (role == NULL))
|| (role != NULL && text2role(role) != rsc->next_role)) {
int log_level = LOG_TRACE;
const char *result = "Ignoring";
if (is_optional) {
char *after_key = NULL;
pe_action_t *cancel_op = NULL;
// It's running, so cancel it
log_level = LOG_INFO;
result = "Cancelling";
cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
switch (rsc->role) {
case RSC_ROLE_UNPROMOTED:
case RSC_ROLE_STARTED:
if (rsc->next_role == RSC_ROLE_PROMOTED) {
after_key = promote_key(rsc);
} else if (rsc->next_role == RSC_ROLE_STOPPED) {
after_key = stop_key(rsc);
}
break;
case RSC_ROLE_PROMOTED:
after_key = demote_key(rsc);
break;
default:
break;
}
if (after_key) {
pcmk__new_ordering(rsc, NULL, cancel_op, rsc, after_key, NULL,
pe_order_runnable_left, data_set);
}
}
do_crm_log(log_level, "%s action %s (%s vs. %s)",
result, key, role ? role : role2text(RSC_ROLE_UNPROMOTED),
role2text(rsc->next_role));
free(key);
return;
}
mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
key = mon->uuid;
if (is_optional) {
pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
}
if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
node_uname, mon->uuid);
pe__clear_action_flags(mon, pe_action_runnable);
} else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
node_uname, mon->uuid);
pe__clear_action_flags(mon, pe_action_runnable);
} else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
mon->task, interval_ms / 1000, rsc->id, node_uname);
}
if (rsc->next_role == RSC_ROLE_PROMOTED) {
char *running_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_promoted);
free(running_promoted);
}
if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pcmk__new_ordering(rsc, start_key(rsc), NULL, NULL, strdup(key), mon,
pe_order_implies_then|pe_order_runnable_left,
data_set);
pcmk__new_ordering(rsc, reload_key(rsc), NULL, NULL, strdup(key), mon,
pe_order_implies_then|pe_order_runnable_left,
data_set);
if (rsc->next_role == RSC_ROLE_PROMOTED) {
pcmk__new_ordering(rsc, promote_key(rsc), NULL, rsc, NULL, mon,
pe_order_optional|pe_order_runnable_left,
data_set);
} else if (rsc->role == RSC_ROLE_PROMOTED) {
pcmk__new_ordering(rsc, demote_key(rsc), NULL, rsc, NULL, mon,
pe_order_optional|pe_order_runnable_left,
data_set);
}
}
}
static void
Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
(node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = pcmk__xe_first_child(rsc->ops_xml);
operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
RecurringOp(rsc, start, node, operation, data_set);
}
}
}
}
static void
RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
xmlNode * operation, pe_working_set_t * data_set)
{
char *key = NULL;
const char *name = NULL;
const char *role = NULL;
const char *interval_spec = NULL;
const char *node_uname = node? node->details->uname : "n/a";
guint interval_ms = 0;
GList *possible_matches = NULL;
GList *gIter = NULL;
/* Only process for the operations with role="Stopped" */
role = crm_element_value(operation, "role");
if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
return;
}
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms == 0) {
return;
}
name = crm_element_value(operation, "name");
if (is_op_dup(rsc, name, interval_ms)) {
crm_trace("Not creating duplicate recurring action %s for %dms %s",
ID(operation), interval_ms, name);
return;
}
if (op_cannot_recur(name)) {
pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
ID(operation), name);
return;
}
key = pcmk__op_key(rsc->id, name, interval_ms);
if (find_rsc_op_entry(rsc, key) == NULL) {
crm_trace("Not creating recurring action %s for disabled resource %s",
ID(operation), rsc->id);
free(key);
return;
}
// @TODO add support
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
crm_notice("Ignoring %s (recurring monitors for Stopped role are "
"not supported for anonymous clones)",
ID(operation));
return;
}
pe_rsc_trace(rsc,
"Creating recurring action %s for %s in role %s on nodes where it should not be running",
ID(operation), rsc->id, role2text(rsc->next_role));
/* if the monitor exists on the node where the resource will be running, cancel it */
if (node != NULL) {
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches) {
pe_action_t *cancel_op = NULL;
g_list_free(possible_matches);
cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
if ((rsc->next_role == RSC_ROLE_STARTED)
|| (rsc->next_role == RSC_ROLE_UNPROMOTED)) {
/* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
/* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
pcmk__new_ordering(rsc, NULL, cancel_op, rsc, start_key(rsc),
NULL, pe_order_runnable_left, data_set);
}
pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
key, role, role2text(rsc->next_role), node_uname);
}
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *stop_node = (pe_node_t *) gIter->data;
const char *stop_node_uname = stop_node->details->uname;
gboolean is_optional = TRUE;
gboolean probe_is_optional = TRUE;
gboolean stop_is_optional = TRUE;
pe_action_t *stopped_mon = NULL;
char *rc_inactive = NULL;
GList *stop_ops = NULL;
GList *local_gIter = NULL;
if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) {
continue;
}
pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
ID(operation), rsc->id, crm_str(stop_node_uname));
/* start a monitor for an already stopped resource */
possible_matches = find_actions_exact(rsc->actions, key, stop_node);
if (possible_matches == NULL) {
pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
crm_str(stop_node_uname));
is_optional = FALSE;
} else {
pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
crm_str(stop_node_uname));
is_optional = TRUE;
g_list_free(possible_matches);
}
stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
free(rc_inactive);
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
FALSE);
GList *pIter = NULL;
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
pe_action_t *probe = (pe_action_t *) pIter->data;
order_actions(probe, stopped_mon, pe_order_runnable_left);
crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
}
g_list_free(probes);
}
stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
pe_action_t *stop = (pe_action_t *) local_gIter->data;
if (!pcmk_is_set(stop->flags, pe_action_optional)) {
stop_is_optional = FALSE;
}
if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
crm_debug("%s\t %s (cancelled : stop un-runnable)",
crm_str(stop_node_uname), stopped_mon->uuid);
pe__clear_action_flags(stopped_mon, pe_action_runnable);
}
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pcmk__new_ordering(rsc, stop_key(rsc), stop, NULL, strdup(key),
stopped_mon,
pe_order_implies_then|pe_order_runnable_left,
data_set);
}
}
if (stop_ops) {
g_list_free(stop_ops);
}
if (is_optional == FALSE && probe_is_optional && stop_is_optional
&& !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
key, crm_str(stop_node_uname));
pe__set_action_flags(stopped_mon, pe_action_optional);
}
if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
}
if (stop_node->details->online == FALSE || stop_node->details->unclean) {
pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
crm_str(stop_node_uname), stopped_mon->uuid);
pe__clear_action_flags(stopped_mon, pe_action_runnable);
}
if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
&& !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
}
}
free(key);
}
static void
Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
(node == NULL || node->details->maintenance == FALSE)) {
xmlNode *operation = NULL;
for (operation = pcmk__xe_first_child(rsc->ops_xml);
operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
RecurringOp_Stopped(rsc, start, node, operation, data_set);
}
}
}
}
static void
handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
{
pe_action_t *migrate_to = NULL;
pe_action_t *migrate_from = NULL;
pe_action_t *start = NULL;
pe_action_t *stop = NULL;
gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
start = start_action(rsc, chosen, TRUE);
stop = stop_action(rsc, current, TRUE);
if (partial == FALSE) {
migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
RSC_MIGRATE, current, TRUE, TRUE, data_set);
}
migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
if ((migrate_to && migrate_from) || (migrate_from && partial)) {
pe__set_action_flags(start, pe_action_migrate_runnable);
pe__set_action_flags(stop, pe_action_migrate_runnable);
// This is easier than trying to delete it from the graph
pe__set_action_flags(start, pe_action_pseudo);
/* order probes before migrations */
if (partial) {
pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
migrate_from->needs = start->needs;
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
NULL, pe_order_optional, data_set);
} else {
pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
pe__set_action_flags(migrate_to, pe_action_migrate_runnable);
migrate_to->needs = start->needs;
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
NULL, pe_order_optional, data_set);
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
NULL,
pe_order_optional|pe_order_implies_first_migratable,
data_set);
}
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_optional|pe_order_implies_first_migratable,
data_set);
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
pe_order_optional|pe_order_implies_first_migratable|pe_order_pseudo_left,
data_set);
}
if (migrate_to) {
add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
/* Pacemaker Remote connections don't require pending to be recorded in
* the CIB. We can reduce CIB writes by not setting PENDING for them.
*/
if (rsc->is_remote_node == FALSE) {
/* migrate_to takes place on the source node, but can
* have an effect on the target node depending on how
* the agent is written. Because of this, we have to maintain
* a record that the migrate_to occurred, in case the source node
* loses membership while the migrate_to action is still in-flight.
*/
add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
}
}
if (migrate_from) {
add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
}
}
void
native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set)
{
pe_action_t *start = NULL;
pe_node_t *chosen = NULL;
pe_node_t *current = NULL;
gboolean need_stop = FALSE;
bool need_promote = FALSE;
gboolean is_moving = FALSE;
gboolean allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
GList *gIter = NULL;
unsigned int num_all_active = 0;
unsigned int num_clean_active = 0;
bool multiply_active = FALSE;
enum rsc_role_e role = RSC_ROLE_UNKNOWN;
enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
CRM_ASSERT(rsc);
chosen = rsc->allocated_to;
next_role = rsc->next_role;
if (next_role == RSC_ROLE_UNKNOWN) {
pe__set_next_role(rsc,
(chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED,
"allocation");
}
pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s",
rsc->id, role2text(rsc->role), role2text(rsc->next_role),
((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"),
((chosen == NULL)? "no node" : chosen->details->uname));
current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
pe_node_t *dangling_source = (pe_node_t *) gIter->data;
pe_action_t *stop = NULL;
pe_rsc_trace(rsc, "Creating stop action %sfor %s on %s due to dangling migration",
pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)? "and cleanup " : "",
rsc->id, dangling_source->details->uname);
stop = stop_action(rsc, dangling_source, FALSE);
pe__set_action_flags(stop, pe_action_dangle);
if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
DeleteRsc(rsc, dangling_source, FALSE, data_set);
}
}
if ((num_all_active == 2) && (num_clean_active == 2) && chosen
&& rsc->partial_migration_source && rsc->partial_migration_target
&& (current->details == rsc->partial_migration_source->details)
&& (chosen->details == rsc->partial_migration_target->details)) {
/* The chosen node is still the migration target from a partial
* migration. Attempt to continue the migration instead of recovering
* by stopping the resource everywhere and starting it on a single node.
*/
pe_rsc_trace(rsc, "Will attempt to continue with partial migration "
"to target %s from %s",
rsc->partial_migration_target->details->id,
rsc->partial_migration_source->details->id);
} else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
/* If a resource has "requires" set to nothing or quorum, don't consider
* it active on unclean nodes (similar to how all resources behave when
* stonith-enabled is false). We can start such resources elsewhere
* before fencing completes, and if we considered the resource active on
* the failed node, we would attempt recovery for being active on
* multiple nodes.
*/
multiply_active = (num_clean_active > 1);
} else {
multiply_active = (num_all_active > 1);
}
if (multiply_active) {
if (rsc->partial_migration_target && rsc->partial_migration_source) {
// Migration was in progress, but we've chosen a different target
crm_notice("Resource %s can no longer migrate from %s to %s "
"(will stop on both nodes)",
rsc->id, rsc->partial_migration_source->details->uname,
rsc->partial_migration_target->details->uname);
} else {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
// Resource was (possibly) incorrectly multiply active
pe_proc_err("%s resource %s might be active on %u nodes (%s)",
crm_str(class), rsc->id, num_all_active,
recovery2text(rsc->recovery_type));
crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
}
if (rsc->recovery_type == recovery_stop_start) {
need_stop = TRUE;
}
/* If by chance a partial migration is in process, but the migration
* target is not chosen still, clear all partial migration data.
*/
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = FALSE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
rsc->id);
start = start_action(rsc, chosen, TRUE);
pe__set_action_flags(start, pe_action_print_always);
}
if (current && chosen && current->details != chosen->details) {
pe_rsc_trace(rsc, "Moving %s from %s to %s",
rsc->id, crm_str(current->details->uname),
crm_str(chosen->details->uname));
is_moving = TRUE;
need_stop = TRUE;
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
need_stop = TRUE;
pe_rsc_trace(rsc, "Recovering %s", rsc->id);
} else {
pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
if (rsc->next_role == RSC_ROLE_PROMOTED) {
need_promote = TRUE;
}
}
} else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
need_stop = TRUE;
} else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
rsc->id);
start = start_action(rsc, chosen, TRUE);
if (!pcmk_is_set(start->flags, pe_action_optional)) {
// Recovery of a promoted resource
pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
need_stop = TRUE;
}
}
/* Create any additional actions required when bringing resource down and
* back up to same level.
*/
role = rsc->role;
while (role != RSC_ROLE_STOPPED) {
next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
(need_stop? "required" : "optional"), rsc->id,
role2text(role), role2text(next_role));
if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
break;
}
role = next_role;
}
while ((rsc->role <= rsc->next_role) && (role != rsc->role)
&& !pcmk_is_set(rsc->flags, pe_rsc_block)) {
bool required = need_stop;
next_role = rsc_state_matrix[role][rsc->role];
if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
required = true;
}
pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
(required? "required" : "optional"), rsc->id,
role2text(role), role2text(next_role));
if (rsc_action_matrix[role][next_role](rsc, chosen, !required,
data_set) == FALSE) {
break;
}
role = next_role;
}
role = rsc->role;
/* Required steps from this role to the next */
while (role != rsc->next_role) {
next_role = rsc_state_matrix[role][rsc->next_role];
pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
rsc->id, role2text(role), role2text(next_role),
role2text(rsc->next_role));
if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
break;
}
role = next_role;
}
if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s",
rsc->id);
} else if ((rsc->next_role != RSC_ROLE_STOPPED)
|| !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s",
((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"),
rsc->id);
start = start_action(rsc, chosen, TRUE);
Recurring(rsc, start, chosen, data_set);
Recurring_Stopped(rsc, start, chosen, data_set);
} else {
pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s",
rsc->id);
Recurring_Stopped(rsc, NULL, NULL, data_set);
}
/* if we are stuck in a partial migration, where the target
* of the partial migration no longer matches the chosen target.
* A full stop/start is required */
if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue",
rsc->id);
allow_migrate = FALSE;
} else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed)
|| pcmk_any_flags_set(rsc->flags,
pe_rsc_failed|pe_rsc_start_pending)
|| (current && current->details->unclean)
|| rsc->next_role < RSC_ROLE_STARTED) {
allow_migrate = FALSE;
}
if (allow_migrate) {
handle_migration_actions(rsc, current, chosen, data_set);
}
}
static void
rsc_avoids_remote_nodes(pe_resource_t *rsc)
{
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->details->remote_rsc) {
node->weight = -INFINITY;
}
}
}
/*!
* \internal
* \brief Return allowed nodes as (possibly sorted) list
*
* Convert a resource's hash table of allowed nodes to a list. If printing to
* stdout, sort the list, to keep action ID numbers consistent for regression
* test output (while avoiding the performance hit on a live cluster).
*
* \param[in] rsc Resource to check for allowed nodes
* \param[in] data_set Cluster working set
*
* \return List of resource's allowed nodes
* \note Callers should take care not to rely on the list being sorted.
*/
static GList *
allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
{
GList *allowed_nodes = NULL;
if (rsc->allowed_nodes) {
allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
if (!pcmk__is_daemon) {
allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
}
return allowed_nodes;
}
void
native_internal_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
{
/* This function is on the critical path and worth optimizing as much as possible */
pe_resource_t *top = NULL;
GList *allowed_nodes = NULL;
bool check_unfencing = FALSE;
bool check_utilization = false;
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc,
"Skipping native constraints for unmanaged resource: %s",
rsc->id);
return;
}
top = uber_parent(rsc);
// Whether resource requires unfencing
check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
&& pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)
&& pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing);
// Whether a non-default placement strategy is used
check_utilization = (g_hash_table_size(rsc->utilization) > 0)
&& !pcmk__str_eq(data_set->placement_strategy,
"default", pcmk__str_casei);
// Order stops before starts (i.e. restart)
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
pe_order_optional|pe_order_implies_then|pe_order_restart,
data_set);
// Promotable ordering: demote before stop, start before promote
if (pcmk_is_set(top->flags, pe_rsc_promotable)
|| (rsc->role > RSC_ROLE_UNPROMOTED)) {
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_promoted_implies_first, data_set);
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
pe_order_runnable_left, data_set);
}
// Don't clear resource history if probing on same node
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0),
NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
NULL, pe_order_same_node|pe_order_then_cancels_first,
data_set);
// Certain checks need allowed nodes
if (check_unfencing || check_utilization || rsc->container) {
allowed_nodes = allowed_nodes_as_list(rsc, data_set);
}
if (check_unfencing) {
/* Check if the node needs to be unfenced first */
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *node = item->data;
pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
crm_debug("Ordering any stops of %s before %s, and any starts after",
rsc->id, unfence->uuid);
/*
* It would be more efficient to order clone resources once,
* rather than order each instance, but ordering the instance
* allows us to avoid unnecessary dependencies that might conflict
* with user constraints.
*
* @TODO: This constraint can still produce a transition loop if the
* resource has a stop scheduled on the node being unfenced, and
* there is a user ordering constraint to start some other resource
* (which will be ordered after the unfence) before stopping this
* resource. An example is "start some slow-starting cloned service
* before stopping an associated virtual IP that may be moving to
* it":
* stop this -> unfencing -> start that -> stop this
*/
pcmk__new_ordering(rsc, stop_key(rsc), NULL,
NULL, strdup(unfence->uuid), unfence,
pe_order_optional|pe_order_same_node, data_set);
pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence,
rsc, start_key(rsc), NULL,
pe_order_implies_then_on_node|pe_order_same_node,
data_set);
}
}
if (check_utilization) {
pcmk__create_utilization_constraints(rsc, allowed_nodes);
}
if (rsc->container) {
pe_resource_t *remote_rsc = NULL;
if (rsc->is_remote_node) {
// rsc is the implicit remote connection for a guest or bundle node
/* Do not allow a guest resource to live on a Pacemaker Remote node,
* to avoid nesting remotes. However, allow bundles to run on remote
* nodes.
*/
if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
rsc_avoids_remote_nodes(rsc->container);
}
/* If someone cleans up a guest or bundle node's container, we will
* likely schedule a (re-)probe of the container and recovery of the
* connection. Order the connection stop after the container probe,
* so that if we detect the container running, we will trigger a new
* transition and avoid the unnecessary recovery.
*/
pcmk__order_resource_actions(rsc->container, RSC_STATUS, rsc,
RSC_STOP, pe_order_optional, data_set);
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
* meta-attribute. This is of questionable merit, since location
* constraints can accomplish the same thing. But we support it, so here
* we check whether a resource (that is not itself a remote connection)
* has container set to a remote node or guest node resource.
*/
} else if (rsc->container->is_remote_node) {
remote_rsc = rsc->container;
} else {
remote_rsc = pe__resource_contains_guest_node(data_set,
rsc->container);
}
if (remote_rsc) {
/* Force the resource on the Pacemaker Remote node instead of
* colocating the resource with the container resource.
*/
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *node = item->data;
if (node->details->remote_rsc != remote_rsc) {
node->weight = -INFINITY;
}
}
} else {
/* This resource is either a filler for a container that does NOT
* represent a Pacemaker Remote node, or a Pacemaker Remote
* connection resource for a guest node or bundle.
*/
int score;
crm_trace("Order and colocate %s relative to its container %s",
rsc->id, rsc->container->id);
pcmk__new_ordering(rsc->container,
pcmk__op_key(rsc->container->id, RSC_START, 0),
NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
NULL,
pe_order_implies_then|pe_order_runnable_left,
data_set);
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
rsc->container,
pcmk__op_key(rsc->container->id, RSC_STOP, 0),
NULL, pe_order_implies_first, data_set);
if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = INFINITY; /* Force them to run on the same host */
}
pcmk__new_colocation("resource-with-container", NULL, score, rsc,
rsc->container, NULL, NULL, true, data_set);
}
}
if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
/* don't allow remote nodes to run stonith devices
* or remote connection resources.*/
rsc_avoids_remote_nodes(rsc);
}
g_list_free(allowed_nodes);
}
void
native_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set)
{
if (dependent == NULL) {
pe_err("dependent was NULL for %s", constraint->id);
return;
} else if (constraint->primary == NULL) {
pe_err("primary was NULL for %s", constraint->id);
return;
}
pe_rsc_trace(dependent,
"Processing colocation constraint between %s and %s",
dependent->id, primary->id);
primary->cmds->rsc_colocation_rh(dependent, primary, constraint, data_set);
}
void
native_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary,
pcmk__colocation_t *constraint,
pe_working_set_t *data_set)
{
enum pcmk__coloc_affects filter_results;
CRM_ASSERT((dependent != NULL) && (primary != NULL));
filter_results = pcmk__colocation_affects(dependent, primary, constraint,
false);
pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
((constraint->score > 0)? "Colocating" : "Anti-colocating"),
dependent->id, primary->id, constraint->id, constraint->score,
filter_results);
switch (filter_results) {
case pcmk__coloc_affects_role:
pcmk__apply_coloc_to_priority(dependent, primary, constraint);
break;
case pcmk__coloc_affects_location:
pcmk__apply_coloc_to_weights(dependent, primary, constraint);
break;
case pcmk__coloc_affects_nothing:
default:
return;
}
}
enum pe_action_flags
native_action_flags(pe_action_t * action, pe_node_t * node)
{
return action->flags;
}
static inline bool
is_primitive_action(pe_action_t *action)
{
return action && action->rsc && (action->rsc->variant == pe_native);
}
/*!
* \internal
* \brief Clear a single action flag and set reason text
*
* \param[in] action Action whose flag should be cleared
* \param[in] flag Action flag that should be cleared
* \param[in] reason Action that is the reason why flag is being cleared
*/
#define clear_action_flag_because(action, flag, reason) do { \
if (pcmk_is_set((action)->flags, (flag))) { \
pe__clear_action_flags(action, flag); \
if ((action)->rsc != (reason)->rsc) { \
char *reason_text = pe__action2reason((reason), (flag)); \
pe_action_set_reason((action), reason_text, \
((flag) == pe_action_migrate_runnable)); \
free(reason_text); \
} \
} \
} while (0)
/*!
* \internal
* \brief Set action bits appropriately when pe_restart_order is used
*
* \param[in] first 'First' action in an ordering with pe_restart_order
* \param[in] then 'Then' action in an ordering with pe_restart_order
* \param[in] filter What ordering flags to care about
*
* \note pe_restart_order is set for "stop resource before starting it" and
* "stop later group member before stopping earlier group member"
*/
static void
handle_restart_ordering(pe_action_t *first, pe_action_t *then,
enum pe_action_flags filter)
{
const char *reason = NULL;
CRM_ASSERT(is_primitive_action(first));
CRM_ASSERT(is_primitive_action(then));
// We need to update the action in two cases:
// ... if 'then' is required
if (pcmk_is_set(filter, pe_action_optional)
&& !pcmk_is_set(then->flags, pe_action_optional)) {
reason = "restart";
}
/* ... if 'then' is unrunnable action on same resource (if a resource
* should restart but can't start, we still want to stop)
*/
if (pcmk_is_set(filter, pe_action_runnable)
&& !pcmk_is_set(then->flags, pe_action_runnable)
&& pcmk_is_set(then->rsc->flags, pe_rsc_managed)
&& (first->rsc == then->rsc)) {
reason = "stop";
}
if (reason == NULL) {
return;
}
pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
first->uuid, then->uuid, reason);
// Make 'first' required if it is runnable
if (pcmk_is_set(first->flags, pe_action_runnable)) {
clear_action_flag_because(first, pe_action_optional, then);
}
// Make 'first' required if 'then' is required
if (!pcmk_is_set(then->flags, pe_action_optional)) {
clear_action_flag_because(first, pe_action_optional, then);
}
// Make 'first' unmigratable if 'then' is unmigratable
if (!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
clear_action_flag_because(first, pe_action_migrate_runnable, then);
}
// Make 'then' unrunnable if 'first' is required but unrunnable
if (!pcmk_is_set(first->flags, pe_action_optional)
&& !pcmk_is_set(first->flags, pe_action_runnable)) {
clear_action_flag_because(then, pe_action_runnable, first);
}
}
/* \param[in] flags Flags from action_flags_for_ordering()
*/
enum pe_graph_flags
native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node,
enum pe_action_flags flags, enum pe_action_flags filter,
enum pe_ordering type, pe_working_set_t *data_set)
{
enum pe_graph_flags changed = pe_graph_none;
enum pe_action_flags then_flags = then->flags;
enum pe_action_flags first_flags = first->flags;
if (type & pe_order_asymmetrical) {
pe_resource_t *then_rsc = then->rsc;
enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
if (!then_rsc) {
/* ignore */
} else if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_casei)) {
/* ignore... if 'then' is supposed to be stopped after 'first', but
* then is already stopped, there is nothing to be done when non-symmetrical. */
} else if ((then_rsc_role >= RSC_ROLE_STARTED)
&& pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)
&& pcmk_is_set(then->flags, pe_action_optional)
&& then->node
&& pcmk__list_of_1(then_rsc->running_on)
&& then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
/* Ignore. If 'then' is supposed to be started after 'first', but
* 'then' is already started, there is nothing to be done when
* asymmetrical -- unless the start is mandatory, which indicates
* the resource is restarting, and the ordering is still needed.
*/
} else if (!(first->flags & pe_action_runnable)) {
/* prevent 'then' action from happening if 'first' is not runnable and
* 'then' has not yet occurred. */
clear_action_flag_because(then, pe_action_optional, first);
clear_action_flag_because(then, pe_action_runnable, first);
} else {
/* ignore... then is allowed to start/stop if it wants to. */
}
}
if (pcmk_is_set(type, pe_order_implies_first)
&& !pcmk_is_set(then_flags, pe_action_optional)) {
// Then is required, and implies first should be, too
if (pcmk_is_set(filter, pe_action_optional)
&& !pcmk_is_set(flags, pe_action_optional)
&& pcmk_is_set(first_flags, pe_action_optional)) {
clear_action_flag_because(first, pe_action_optional, then);
}
if (pcmk_is_set(flags, pe_action_migrate_runnable) &&
!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
clear_action_flag_because(first, pe_action_migrate_runnable, then);
}
}
if (type & pe_order_promoted_implies_first) {
if ((filter & pe_action_optional) &&
((then->flags & pe_action_optional) == FALSE) &&
(then->rsc != NULL) && (then->rsc->role == RSC_ROLE_PROMOTED)) {
clear_action_flag_because(first, pe_action_optional, then);
if (pcmk_is_set(first->flags, pe_action_migrate_runnable) &&
!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
clear_action_flag_because(first, pe_action_migrate_runnable,
then);
}
}
}
if ((type & pe_order_implies_first_migratable)
&& pcmk_is_set(filter, pe_action_optional)) {
if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
((then->flags & pe_action_runnable) == FALSE)) {
clear_action_flag_because(first, pe_action_runnable, then);
}
if ((then->flags & pe_action_optional) == 0) {
clear_action_flag_because(first, pe_action_optional, then);
}
}
if ((type & pe_order_pseudo_left)
&& pcmk_is_set(filter, pe_action_optional)) {
if ((first->flags & pe_action_runnable) == FALSE) {
clear_action_flag_because(then, pe_action_migrate_runnable, first);
pe__clear_action_flags(then, pe_action_pseudo);
}
}
if (pcmk_is_set(type, pe_order_runnable_left)
&& pcmk_is_set(filter, pe_action_runnable)
&& pcmk_is_set(then->flags, pe_action_runnable)
&& !pcmk_is_set(flags, pe_action_runnable)) {
clear_action_flag_because(then, pe_action_runnable, first);
clear_action_flag_because(then, pe_action_migrate_runnable, first);
}
if (pcmk_is_set(type, pe_order_implies_then)
&& pcmk_is_set(filter, pe_action_optional)
&& pcmk_is_set(then->flags, pe_action_optional)
&& !pcmk_is_set(flags, pe_action_optional)
&& !pcmk_is_set(first->flags, pe_action_migrate_runnable)) {
clear_action_flag_because(then, pe_action_optional, first);
}
if (pcmk_is_set(type, pe_order_restart)) {
handle_restart_ordering(first, then, filter);
}
if (then_flags != then->flags) {
pe__set_graph_flags(changed, first, pe_graph_updated_then);
pe_rsc_trace(then->rsc,
"%s on %s: flags are now %#.6x (was %#.6x) "
"because of 'first' %s (%#.6x)",
then->uuid,
then->node? then->node->details->uname : "no node",
then->flags, then_flags, first->uuid, first->flags);
if(then->rsc && then->rsc->parent) {
/* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
pcmk__update_action_for_orderings(then, data_set);
}
}
if (first_flags != first->flags) {
pe__set_graph_flags(changed, first, pe_graph_updated_first);
pe_rsc_trace(first->rsc,
"%s on %s: flags are now %#.6x (was %#.6x) "
"because of 'then' %s (%#.6x)",
first->uuid,
first->node? first->node->details->uname : "no node",
first->flags, first_flags, then->uuid, then->flags);
}
return changed;
}
void
native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
{
pcmk__apply_location(constraint, rsc);
}
void
native_expand(pe_resource_t * rsc, pe_working_set_t * data_set)
{
GList *gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
pcmk__add_action_to_graph(action, data_set);
}
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
child_rsc->cmds->expand(child_rsc, data_set);
}
}
gboolean
StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GList *gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *current = (pe_node_t *) gIter->data;
pe_action_t *stop;
if (rsc->partial_migration_target) {
if (rsc->partial_migration_target->details == current->details) {
pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
next->details->uname, rsc->id);
continue;
} else {
pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
optional = FALSE;
}
}
pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
stop = stop_action(rsc, current, optional);
if(rsc->allocated_to == NULL) {
pe_action_set_reason(stop, "node availability", TRUE);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe__clear_action_flags(stop, pe_action_runnable);
}
if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
DeleteRsc(rsc, current, optional, data_set);
}
if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
order_actions(stop, unfence, pe_order_implies_first);
if (!pcmk__node_unfenced(current)) {
pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
}
}
}
return TRUE;
}
gboolean
StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
pe_action_t *start = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
start = start_action(rsc, next, TRUE);
pcmk__order_vs_unfence(rsc, next, start, pe_order_implies_then, data_set);
if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
pe__clear_action_flags(start, pe_action_optional);
}
return TRUE;
}
gboolean
PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GList *gIter = NULL;
gboolean runnable = TRUE;
GList *action_list = NULL;
CRM_ASSERT(rsc);
CRM_CHECK(next != NULL, return FALSE);
pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
pe_action_t *start = (pe_action_t *) gIter->data;
if (!pcmk_is_set(start->flags, pe_action_runnable)) {
runnable = FALSE;
}
}
g_list_free(action_list);
if (runnable) {
promote_action(rsc, next, optional);
return TRUE;
}
pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
pe_action_t *promote = (pe_action_t *) gIter->data;
pe__clear_action_flags(promote, pe_action_runnable);
}
g_list_free(action_list);
return TRUE;
}
gboolean
DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
GList *gIter = NULL;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
/* CRM_CHECK(rsc->next_role == RSC_ROLE_UNPROMOTED, return FALSE); */
for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
pe_node_t *current = (pe_node_t *) gIter->data;
pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
demote_action(rsc, current, optional);
}
return TRUE;
}
gboolean
RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
CRM_ASSERT(rsc);
crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
CRM_CHECK(FALSE, return FALSE);
return FALSE;
}
gboolean
NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
{
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s", rsc->id);
return FALSE;
}
gboolean
DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
{
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
return FALSE;
} else if (node == NULL) {
pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
return FALSE;
} else if (node->details->unclean || node->details->online == FALSE) {
pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
node->details->uname);
return FALSE;
}
crm_notice("Removing %s from %s", rsc->id, node->details->uname);
delete_action(rsc, node, optional);
pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_DELETE,
optional? pe_order_implies_then : pe_order_optional,
data_set);
pcmk__order_resource_actions(rsc, RSC_DELETE, rsc, RSC_START,
optional? pe_order_implies_then : pe_order_optional,
data_set);
return TRUE;
}
gboolean
native_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete,
gboolean force, pe_working_set_t * data_set)
{
enum pe_ordering flags = pe_order_optional;
char *key = NULL;
pe_action_t *probe = NULL;
pe_node_t *running = NULL;
pe_node_t *allowed = NULL;
pe_resource_t *top = uber_parent(rsc);
static const char *rc_promoted = NULL;
static const char *rc_inactive = NULL;
if (rc_inactive == NULL) {
rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
rc_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
}
CRM_CHECK(node != NULL, return FALSE);
if (!force && !pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
return FALSE;
}
if (pe__is_guest_or_remote_node(node)) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
rsc->id, node->details->id);
return FALSE;
} else if (pe__is_guest_node(node)
&& pe__resource_contains_guest_node(data_set, rsc)) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
rsc->id, node->details->id);
return FALSE;
} else if (rsc->is_remote_node) {
pe_rsc_trace(rsc,
"Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
rsc->id, node->details->id);
return FALSE;
}
}
if (rsc->children) {
GList *gIter = NULL;
gboolean any_created = FALSE;
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
|| any_created;
}
return any_created;
} else if ((rsc->container) && (!rsc->is_remote_node)) {
pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
return FALSE;
}
if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
return FALSE;
}
// Check whether resource is already known on node
if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
return FALSE;
}
allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (rsc->exclusive_discover || top->exclusive_discover) {
if (allowed == NULL) {
/* exclusive discover is enabled and this node is not in the allowed list. */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
return FALSE;
} else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
/* exclusive discover is enabled and this node is not marked
* as a node this resource should be discovered on */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
return FALSE;
}
}
if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
/* If this node was allowed to host this resource it would
* have been explicitly added to the 'allowed_nodes' list.
* However it wasn't and the node has discovery disabled, so
* no need to probe for this resource.
*/
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
return FALSE;
}
if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
/* this resource is marked as not needing to be discovered on this node */
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
return FALSE;
}
if (pe__is_guest_node(node)) {
pe_resource_t *remote = node->details->remote_rsc->container;
if(remote->role == RSC_ROLE_STOPPED) {
/* If the container is stopped, then we know anything that
* might have been inside it is also stopped and there is
* no need to probe.
*
* If we don't know the container's state on the target
* either:
*
* - the container is running, the transition will abort
* and we'll end up in a different case next time, or
*
* - the container is stopped
*
* Either way there is no need to probe.
*
*/
if(remote->allocated_to
&& g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
/* For safety, we order the 'rsc' start after 'remote'
* has been probed.
*
* Using 'top' helps for groups, but we may need to
* follow the start's ordering chain backwards.
*/
pcmk__new_ordering(remote,
pcmk__op_key(remote->id, RSC_STATUS, 0),
NULL, top,
pcmk__op_key(top->id, RSC_START, 0), NULL,
pe_order_optional, data_set);
}
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
rsc->id, node->details->id, remote->id);
return FALSE;
/* Here we really we want to check if remote->stop is required,
* but that information doesn't exist yet
*/
} else if(node->details->remote_requires_reset
|| node->details->unclean
|| pcmk_is_set(remote->flags, pe_rsc_failed)
|| remote->next_role == RSC_ROLE_STOPPED
|| (remote->allocated_to
&& pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
) {
/* The container is stopping or restarting, don't start
* 'rsc' until 'remote' stops as this also implies that
* 'rsc' is stopped - avoiding the need to probe
*/
pcmk__new_ordering(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
NULL, top, pcmk__op_key(top->id, RSC_START, 0),
NULL, pe_order_optional, data_set);
pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
rsc->id, node->details->id, remote->id);
return FALSE;
/* } else {
* The container is running so there is no problem probing it
*/
}
}
key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
pe__clear_action_flags(probe, pe_action_optional);
pcmk__order_vs_unfence(rsc, node, probe, pe_order_optional, data_set);
/*
* We need to know if it's running_on (not just known_on) this node
* to correctly determine the target rc.
*/
running = pe_find_node_id(rsc->running_on, node->details->id);
if (running == NULL) {
add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
} else if (rsc->role == RSC_ROLE_PROMOTED) {
add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_promoted);
}
crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
pcmk_is_set(probe->flags, pe_action_runnable), rsc->running_on);
if (pcmk__is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
top = rsc;
} else {
crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
}
if (!pcmk_is_set(probe->flags, pe_action_runnable)
&& (rsc->running_on == NULL)) {
/* Prevent the start from occurring if rsc isn't active, but
* don't cause it to stop if it was active already
*/
pe__set_order_flags(flags, pe_order_runnable_left);
}
pcmk__new_ordering(rsc, NULL, probe, top,
pcmk__op_key(top->id, RSC_START, 0), NULL, flags,
data_set);
// Order the probe before any agent reload
pcmk__new_ordering(rsc, NULL, probe, top, reload_key(rsc), NULL,
pe_order_optional, data_set);
return TRUE;
}
void
native_append_meta(pe_resource_t * rsc, xmlNode * xml)
{
char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
pe_resource_t *parent;
if (value) {
char *name = NULL;
name = crm_meta_name(XML_RSC_ATTR_INCARNATION);
crm_xml_add(xml, name, value);
free(name);
}
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
if (value) {
char *name = NULL;
name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE);
crm_xml_add(xml, name, value);
free(name);
}
for (parent = rsc; parent != NULL; parent = parent->parent) {
if (parent->container) {
crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id);
}
}
}
// Primitive implementation of resource_alloc_functions_t:add_utilization()
void
pcmk__primitive_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return;
}
pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization",
orig_rsc->id, rsc->id);
pcmk__release_node_capacity(utilization, rsc);
}
+
+/*!
+ * \internal
+ * \brief Get epoch time of node's shutdown attribute (or now if none)
+ *
+ * \param[in] node Node to check
+ * \param[in] data_set Cluster working set
+ *
+ * \return Epoch time corresponding to shutdown attribute if set or now if not
+ */
+static time_t
+shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
+{
+ const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
+ time_t result = 0;
+
+ if (shutdown != NULL) {
+ long long result_ll;
+
+ if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
+ result = (time_t) result_ll;
+ }
+ }
+ return (result == 0)? get_effective_time(data_set) : result;
+}
+
+// Primitive implementation of resource_alloc_functions_t:shutdown_lock()
+void
+pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
+{
+ const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+
+ // Fence devices and remote connections can't be locked
+ if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
+ || pe__resource_is_remote_conn(rsc, rsc->cluster)) {
+ return;
+ }
+
+ if (rsc->lock_node != NULL) {
+ // The lock was obtained from resource history
+
+ if (rsc->running_on != NULL) {
+ /* The resource was started elsewhere even though it is now
+ * considered locked. This shouldn't be possible, but as a
+ * failsafe, we don't want to disturb the resource now.
+ */
+ pe_rsc_info(rsc,
+ "Cancelling shutdown lock because %s is already active",
+ rsc->id);
+ pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster);
+ rsc->lock_node = NULL;
+ rsc->lock_time = 0;
+ }
+
+ // Only a resource active on exactly one node can be locked
+ } else if (pcmk__list_of_1(rsc->running_on)) {
+ pe_node_t *node = rsc->running_on->data;
+
+ if (node->details->shutdown) {
+ if (node->details->unclean) {
+ pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
+ rsc->id, node->details->uname);
+ } else {
+ rsc->lock_node = node;
+ rsc->lock_time = shutdown_time(node, rsc->cluster);
+ }
+ }
+ }
+
+ if (rsc->lock_node == NULL) {
+ // No lock needed
+ return;
+ }
+
+ if (rsc->cluster->shutdown_lock > 0) {
+ time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock;
+
+ pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
+ rsc->id, rsc->lock_node->details->uname,
+ (long long) lock_expiration);
+ pe__update_recheck_time(++lock_expiration, rsc->cluster);
+ } else {
+ pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
+ rsc->id, rsc->lock_node->details->uname);
+ }
+
+ // If resource is locked to one node, ban it from all other nodes
+ for (GList *item = rsc->cluster->nodes; item != NULL; item = item->next) {
+ pe_node_t *node = item->data;
+
+ if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
+ resource_location(rsc, node, -CRM_SCORE_INFINITY,
+ XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster);
+ }
+ }
+}
diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c
index e9cf2e0aba..a3d6467758 100644
--- a/lib/pacemaker/pcmk_sched_resource.c
+++ b/lib/pacemaker/pcmk_sched_resource.c
@@ -1,703 +1,707 @@
/*
* Copyright 2014-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include "libpacemaker_private.h"
// Resource allocation methods that vary by resource variant
static resource_alloc_functions_t allocation_methods[] = {
{
pcmk__native_merge_weights,
pcmk__native_allocate,
native_create_actions,
native_create_probe,
native_internal_constraints,
native_rsc_colocation_lh,
native_rsc_colocation_rh,
pcmk__colocated_resources,
native_rsc_location,
native_action_flags,
native_update_actions,
pcmk__output_resource_actions,
native_expand,
native_append_meta,
pcmk__primitive_add_utilization,
+ pcmk__primitive_shutdown_lock,
},
{
pcmk__group_merge_weights,
pcmk__group_allocate,
group_create_actions,
native_create_probe,
group_internal_constraints,
group_rsc_colocation_lh,
group_rsc_colocation_rh,
pcmk__group_colocated_resources,
group_rsc_location,
group_action_flags,
group_update_actions,
pcmk__output_resource_actions,
group_expand,
group_append_meta,
pcmk__group_add_utilization,
+ pcmk__group_shutdown_lock,
},
{
pcmk__native_merge_weights,
pcmk__clone_allocate,
clone_create_actions,
clone_create_probe,
clone_internal_constraints,
clone_rsc_colocation_lh,
clone_rsc_colocation_rh,
pcmk__colocated_resources,
clone_rsc_location,
clone_action_flags,
pcmk__multi_update_actions,
pcmk__output_resource_actions,
clone_expand,
clone_append_meta,
pcmk__clone_add_utilization,
+ pcmk__clone_shutdown_lock,
},
{
pcmk__native_merge_weights,
pcmk__bundle_allocate,
pcmk__bundle_create_actions,
pcmk__bundle_create_probe,
pcmk__bundle_internal_constraints,
pcmk__bundle_rsc_colocation_lh,
pcmk__bundle_rsc_colocation_rh,
pcmk__colocated_resources,
pcmk__bundle_rsc_location,
pcmk__bundle_action_flags,
pcmk__multi_update_actions,
pcmk__output_bundle_actions,
pcmk__bundle_expand,
pcmk__bundle_append_meta,
pcmk__bundle_add_utilization,
+ pcmk__bundle_shutdown_lock,
}
};
/*!
* \internal
* \brief Check whether a resource's agent standard, provider, or type changed
*
* \param[in] rsc Resource to check
* \param[in] node Node needing unfencing/restart if agent changed
* \param[in] rsc_entry XML with previously known agent information
* \param[in] active_on_node Whether \p rsc is active on \p node
*
* \return true if agent for \p rsc changed, otherwise false
*/
bool
pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
const xmlNode *rsc_entry, bool active_on_node)
{
bool changed = false;
const char *attr_list[] = {
XML_ATTR_TYPE,
XML_AGENT_ATTR_CLASS,
XML_AGENT_ATTR_PROVIDER
};
for (int i = 0; i < PCMK__NELEM(attr_list); i++) {
const char *value = crm_element_value(rsc->xml, attr_list[i]);
const char *old_value = crm_element_value(rsc_entry, attr_list[i]);
if (!pcmk__str_eq(value, old_value, pcmk__str_none)) {
changed = true;
trigger_unfencing(rsc, node, "Device definition changed", NULL,
rsc->cluster);
if (active_on_node) {
crm_notice("Forcing restart of %s on %s "
"because %s changed from '%s' to '%s'",
rsc->id, node->details->uname, attr_list[i],
crm_str(old_value), crm_str(value));
}
}
}
if (changed && active_on_node) {
// Make sure the resource is restarted
custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE,
rsc->cluster);
pe__set_resource_flags(rsc, pe_rsc_start_pending);
}
return changed;
}
/*!
* \internal
* \brief Add resource (and any matching children) to list if it matches ID
*
* \param[in] result List to add resource to
* \param[in] rsc Resource to check
* \param[in] id ID to match
*
* \return (Possibly new) head of list
*/
static GList *
add_rsc_if_matching(GList *result, pe_resource_t *rsc, const char *id)
{
if ((strcmp(rsc->id, id) == 0)
|| ((rsc->clone_name != NULL) && (strcmp(rsc->clone_name, id) == 0))) {
result = g_list_prepend(result, rsc);
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *child = (pe_resource_t *) iter->data;
result = add_rsc_if_matching(result, child, id);
}
return result;
}
/*!
* \internal
* \brief Find all resources matching a given ID by either ID or clone name
*
* \param[in] id Resource ID to check
* \param[in] data_set Cluster working set
*
* \return List of all resources that match \p id
* \note The caller is responsible for freeing the return value with
* g_list_free().
*/
GList *
pcmk__rscs_matching_id(const char *id, pe_working_set_t *data_set)
{
GList *result = NULL;
CRM_CHECK((id != NULL) && (data_set != NULL), return NULL);
for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
result = add_rsc_if_matching(result, (pe_resource_t *) iter->data, id);
}
return result;
}
/*!
* \internal
* \brief Set the variant-appropriate allocation methods for a resource
*
* \param[in] rsc Resource to set allocation methods for
* \param[in] ignored Only here so function can be used with g_list_foreach()
*/
static void
set_allocation_methods_for_rsc(pe_resource_t *rsc, void *ignored)
{
rsc->cmds = &allocation_methods[rsc->variant];
g_list_foreach(rsc->children, (GFunc) set_allocation_methods_for_rsc, NULL);
}
/*!
* \internal
* \brief Set the variant-appropriate allocation methods for all resources
*
* \param[in] data_set Cluster working set
*/
void
pcmk__set_allocation_methods(pe_working_set_t *data_set)
{
g_list_foreach(data_set->resources, (GFunc) set_allocation_methods_for_rsc,
NULL);
}
// Shared implementation of resource_alloc_functions_t:colocated_resources()
GList *
pcmk__colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc,
GList *colocated_rscs)
{
GList *gIter = NULL;
if (orig_rsc == NULL) {
orig_rsc = rsc;
}
if ((rsc == NULL) || (g_list_find(colocated_rscs, rsc) != NULL)) {
return colocated_rscs;
}
pe_rsc_trace(orig_rsc, "%s is in colocation chain with %s",
rsc->id, orig_rsc->id);
colocated_rscs = g_list_append(colocated_rscs, rsc);
// Follow colocations where this resource is the dependent resource
for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
pe_resource_t *primary = constraint->primary;
if (primary == orig_rsc) {
continue; // Break colocation loop
}
if ((constraint->score == INFINITY) &&
(pcmk__colocation_affects(rsc, primary, constraint,
true) == pcmk__coloc_affects_location)) {
colocated_rscs = primary->cmds->colocated_resources(primary,
orig_rsc,
colocated_rscs);
}
}
// Follow colocations where this resource is the primary resource
for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
pe_resource_t *dependent = constraint->dependent;
if (dependent == orig_rsc) {
continue; // Break colocation loop
}
if (pe_rsc_is_clone(rsc) && !pe_rsc_is_clone(dependent)) {
continue; // We can't be sure whether dependent will be colocated
}
if ((constraint->score == INFINITY) &&
(pcmk__colocation_affects(dependent, rsc, constraint,
true) == pcmk__coloc_affects_location)) {
colocated_rscs = dependent->cmds->colocated_resources(dependent,
orig_rsc,
colocated_rscs);
}
}
return colocated_rscs;
}
void
pcmk__output_resource_actions(pe_resource_t *rsc)
{
pcmk__output_t *out = rsc->cluster->priv;
pe_node_t *next = NULL;
pe_node_t *current = NULL;
gboolean moving = FALSE;
if (rsc->children != NULL) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *child = (pe_resource_t *) iter->data;
child->cmds->output_actions(child);
}
return;
}
next = rsc->allocated_to;
if (rsc->running_on) {
current = pe__current_node(rsc);
if (rsc->role == RSC_ROLE_STOPPED) {
/*
* This can occur when resources are being recovered
* We fiddle with the current role in native_create_actions()
*/
rsc->role = RSC_ROLE_STARTED;
}
}
if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
/* Don't log stopped orphans */
return;
}
out->message(out, "rsc-action", rsc, current, next, moving);
}
/*!
* \internal
* \brief Assign a specified primitive resource to a node
*
* Assign a specified primitive resource to a specified node, if the node can
* run the resource (or unconditionally, if \p force is true). Mark the resource
* as no longer provisional. If the primitive can't be assigned (or \p chosen is
* NULL), unassign any previous assignment for it, set its next role to stopped,
* and update any existing actions scheduled for it. This is not done
* recursively for children, so it should be called only for primitives.
*
* \param[in] rsc Resource to assign
* \param[in] chosen Node to assign \p rsc to
* \param[in] force If true, assign to \p chosen even if unavailable
*
* \return true if \p rsc could be assigned, otherwise false
*
* \note Assigning a resource to the NULL node using this function is different
* from calling pcmk__unassign_resource(), in that it will also update any
* actions created for the resource.
*/
bool
pcmk__assign_primitive(pe_resource_t *rsc, pe_node_t *chosen, bool force)
{
pcmk__output_t *out = rsc->cluster->priv;
CRM_ASSERT(rsc->variant == pe_native);
if (!force && (chosen != NULL)) {
if ((chosen->weight < 0)
// Allow the graph to assume that guest node connections will come up
|| (!pcmk__node_available(chosen) && !pe__is_guest_node(chosen))) {
crm_debug("All nodes for resource %s are unavailable, unclean or "
"shutting down (%s can%s run resources, with weight %d)",
rsc->id, chosen->details->uname,
(pcmk__node_available(chosen)? "" : "not"),
chosen->weight);
pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability");
chosen = NULL;
}
}
pcmk__unassign_resource(rsc);
pe__clear_resource_flags(rsc, pe_rsc_provisional);
if (chosen == NULL) {
crm_debug("Could not allocate a node for %s", rsc->id);
pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate");
for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
pe_action_t *op = (pe_action_t *) iter->data;
crm_debug("Updating %s for allocation failure", op->uuid);
if (pcmk__str_eq(op->task, RSC_STOP, pcmk__str_casei)) {
pe__clear_action_flags(op, pe_action_optional);
} else if (pcmk__str_eq(op->task, RSC_START, pcmk__str_casei)) {
pe__clear_action_flags(op, pe_action_runnable);
//pe__set_resource_flags(rsc, pe_rsc_block);
} else {
// Cancel recurring actions, unless for stopped state
const char *interval_ms_s = NULL;
const char *target_rc_s = NULL;
char *rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
interval_ms_s = g_hash_table_lookup(op->meta,
XML_LRM_ATTR_INTERVAL_MS);
target_rc_s = g_hash_table_lookup(op->meta,
XML_ATTR_TE_TARGET_RC);
if ((interval_ms_s != NULL)
&& !pcmk__str_eq(interval_ms_s, "0", pcmk__str_none)
&& !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) {
pe__clear_action_flags(op, pe_action_runnable);
}
free(rc_stopped);
}
}
return false;
}
crm_debug("Assigning %s to %s", rsc->id, chosen->details->uname);
rsc->allocated_to = pe__copy_node(chosen);
chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc,
rsc);
chosen->details->num_resources++;
chosen->count++;
pcmk__consume_node_capacity(chosen->details->utilization, rsc);
if (pcmk_is_set(rsc->cluster->flags, pe_flag_show_utilization)) {
out->message(out, "resource-util", rsc, chosen, __func__);
}
return true;
}
/*!
* \internal
* \brief Assign a specified resource (of any variant) to a node
*
* Assign a specified resource and its children (if any) to a specified node, if
* the node can run the resource (or unconditionally, if \p force is true). Mark
* the resources as no longer provisional. If the resources can't be assigned
* (or \p chosen is NULL), unassign any previous assignments, set next role to
* stopped, and update any existing actions scheduled for them.
*
* \param[in] rsc Resource to assign
* \param[in] chosen Node to assign \p rsc to
* \param[in] force If true, assign to \p chosen even if unavailable
*
* \return true if \p rsc could be assigned, otherwise false
*
* \note Assigning a resource to the NULL node using this function is different
* from calling pcmk__unassign_resource(), in that it will also update any
* actions created for the resource.
*/
bool
pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force)
{
bool changed = false;
if (rsc->children == NULL) {
if (rsc->allocated_to != NULL) {
changed = true;
}
pcmk__assign_primitive(rsc, node, force);
} else {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
changed |= pcmk__assign_resource(child_rsc, node, force);
}
}
return changed;
}
/*!
* \internal
* \brief Remove any assignment of a specified resource to a node
*
* If a specified resource has been assigned to a node, remove that assignment
* and mark the resource as provisional again. This is not done recursively for
* children, so it should be called only for primitives.
*
* \param[in] rsc Resource to unassign
*/
void
pcmk__unassign_resource(pe_resource_t *rsc)
{
pe_node_t *old = rsc->allocated_to;
if (old == NULL) {
return;
}
crm_info("Unassigning %s from %s", rsc->id, old->details->uname);
pe__set_resource_flags(rsc, pe_rsc_provisional);
rsc->allocated_to = NULL;
/* We're going to free the pe_node_t, but its details member is shared and
* will remain, so update that appropriately first.
*/
old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc,
rsc);
old->details->num_resources--;
pcmk__release_node_capacity(old->details->utilization, rsc);
free(old);
}
/*!
* \internal
* \brief Check whether a resource has reached its migration threshold on a node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
* \param[out] failed If the threshold has been reached, this will be set to
* the resource that failed (possibly a parent of \p rsc)
*
* \return true if the migration threshold has been reached, false otherwise
*/
bool
pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node,
pe_resource_t **failed)
{
int fail_count, remaining_tries;
pe_resource_t *rsc_to_ban = rsc;
// Migration threshold of 0 means never force away
if (rsc->migration_threshold == 0) {
return false;
}
// If we're ignoring failures, also ignore the migration threshold
if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
return false;
}
// If there are no failures, there's no need to force away
fail_count = pe_get_failcount(node, rsc, NULL,
pe_fc_effective|pe_fc_fillers, NULL,
rsc->cluster);
if (fail_count <= 0) {
return false;
}
// If failed resource is anonymous clone instance, we'll force clone away
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
rsc_to_ban = uber_parent(rsc);
}
// How many more times recovery will be tried on this node
remaining_tries = rsc->migration_threshold - fail_count;
if (remaining_tries <= 0) {
crm_warn("%s cannot run on %s due to reaching migration threshold "
"(clean up resource to allow again)"
CRM_XS " failures=%d migration-threshold=%d",
rsc_to_ban->id, node->details->uname, fail_count,
rsc->migration_threshold);
if (failed != NULL) {
*failed = rsc_to_ban;
}
return true;
}
crm_info("%s can fail %d more time%s on "
"%s before reaching migration threshold (%d)",
rsc_to_ban->id, remaining_tries, pcmk__plural_s(remaining_tries),
node->details->uname, rsc->migration_threshold);
return false;
}
static void *
convert_const_pointer(const void *ptr)
{
/* Worst function ever */
return (void *)ptr;
}
/*!
* \internal
* \brief Get a node's weight
*
* \param[in] node Unweighted node to check (for node ID)
* \param[in] nodes List of weighted nodes to look for \p node in
*
* \return Node's weight, or -INFINITY if not found
*/
static int
get_node_weight(pe_node_t *node, GHashTable *nodes)
{
pe_node_t *weighted_node = NULL;
if ((node != NULL) && (nodes != NULL)) {
weighted_node = g_hash_table_lookup(nodes, node->details->id);
}
return (weighted_node == NULL)? -INFINITY : weighted_node->weight;
}
/*!
* \internal
* \brief Compare two resources according to which should be allocated first
*
* \param[in] a First resource to compare
* \param[in] b Second resource to compare
* \param[in] data Sorted list of all nodes in cluster
*
* \return -1 if \p a should be allocated before \b, 0 if they are equal,
* or +1 if \p a should be allocated after \b
*/
static gint
cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
{
const pe_resource_t *resource1 = a;
const pe_resource_t *resource2 = b;
GList *nodes = (GList *) data;
int rc = 0;
int r1_weight = -INFINITY;
int r2_weight = -INFINITY;
pe_node_t *r1_node = NULL;
pe_node_t *r2_node = NULL;
GHashTable *r1_nodes = NULL;
GHashTable *r2_nodes = NULL;
const char *reason = NULL;
// Resources with highest priority should be allocated first
reason = "priority";
r1_weight = resource1->priority;
r2_weight = resource2->priority;
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
// We need nodes to make any other useful comparisons
reason = "no node list";
if (nodes == NULL) {
goto done;
}
// Calculate and log node weights
r1_nodes = pcmk__native_merge_weights(convert_const_pointer(resource1),
resource1->id, NULL, NULL, 1,
pe_weights_forward | pe_weights_init);
r2_nodes = pcmk__native_merge_weights(convert_const_pointer(resource2),
resource2->id, NULL, NULL, 1,
pe_weights_forward | pe_weights_init);
pe__show_node_weights(true, NULL, resource1->id, r1_nodes,
resource1->cluster);
pe__show_node_weights(true, NULL, resource2->id, r2_nodes,
resource2->cluster);
// The resource with highest score on its current node goes first
reason = "current location";
if (resource1->running_on != NULL) {
r1_node = pe__current_node(resource1);
}
if (resource2->running_on != NULL) {
r2_node = pe__current_node(resource2);
}
r1_weight = get_node_weight(r1_node, r1_nodes);
r2_weight = get_node_weight(r2_node, r2_nodes);
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
// Otherwise a higher weight on any node will do
reason = "score";
for (GList *iter = nodes; iter != NULL; iter = iter->next) {
pe_node_t *node = (pe_node_t *) iter->data;
r1_weight = get_node_weight(node, r1_nodes);
r2_weight = get_node_weight(node, r2_nodes);
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
}
done:
crm_trace("%s (%d)%s%s %c %s (%d)%s%s: %s",
resource1->id, r1_weight,
((r1_node == NULL)? "" : " on "),
((r1_node == NULL)? "" : r1_node->details->id),
((rc < 0)? '>' : ((rc > 0)? '<' : '=')),
resource2->id, r2_weight,
((r2_node == NULL)? "" : " on "),
((r2_node == NULL)? "" : r2_node->details->id),
reason);
if (r1_nodes != NULL) {
g_hash_table_destroy(r1_nodes);
}
if (r2_nodes != NULL) {
g_hash_table_destroy(r2_nodes);
}
return rc;
}
/*!
* \internal
* \brief Sort resources in the order they should be allocated to nodes
*
* \param[in] data_set Cluster working set
*/
void
pcmk__sort_resources(pe_working_set_t *data_set)
{
GList *nodes = g_list_copy(data_set->nodes);
nodes = pcmk__sort_nodes(nodes, NULL, data_set);
data_set->resources = g_list_sort_with_data(data_set->resources,
cmp_resources, nodes);
g_list_free(nodes);
}
diff --git a/lib/pacemaker/pcmk_sched_utilization.c b/lib/pacemaker/pcmk_sched_utilization.c
index ecbcc33c4b..734380854b 100644
--- a/lib/pacemaker/pcmk_sched_utilization.c
+++ b/lib/pacemaker/pcmk_sched_utilization.c
@@ -1,443 +1,464 @@
/*
* Copyright 2014-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Get integer utilization from a string
*
* \param[in] s String representation of a node utilization value
*
* \return Integer equivalent of \p s
* \todo It would make sense to restrict utilization values to nonnegative
* integers, but the documentation just says "integers" and we didn't
* restrict them initially, so for backward compatibility, allow any
* integer.
*/
static int
utilization_value(const char *s)
{
int value = 0;
if ((s != NULL) && (pcmk__scan_min_int(s, &value, INT_MIN) == EINVAL)) {
pe_warn("Using 0 for utilization instead of invalid value '%s'", value);
value = 0;
}
return value;
}
/*
* Functions for comparing node capacities
*/
struct compare_data {
const pe_node_t *node1;
const pe_node_t *node2;
bool node2_only;
int result;
};
/*!
* \internal
* \brief Compare a single utilization attribute for two nodes
*
* Compare one utilization attribute for two nodes, incrementing the result if
* the first node has greater capacity, and decrementing it if the second node
* has greater capacity.
*
* \param[in] key Utilization attribute name to compare
* \param[in] value Utilization attribute value to compare
* \param[in] user_data Comparison data (as struct compare_data*)
*/
static void
compare_utilization_value(gpointer key, gpointer value, gpointer user_data)
{
int node1_capacity = 0;
int node2_capacity = 0;
struct compare_data *data = user_data;
const char *node2_value = NULL;
if (data->node2_only) {
if (g_hash_table_lookup(data->node1->details->utilization, key)) {
return; // We've already compared this attribute
}
} else {
node1_capacity = utilization_value((const char *) value);
}
node2_value = g_hash_table_lookup(data->node2->details->utilization, key);
node2_capacity = utilization_value(node2_value);
if (node1_capacity > node2_capacity) {
data->result--;
} else if (node1_capacity < node2_capacity) {
data->result++;
}
}
/*!
* \internal
* \brief Compare utilization capacities of two nodes
*
* \param[in] node1 First node to compare
* \param[in] node2 Second node to compare
*
* \return Negative integer if node1 has more free capacity,
* 0 if the capacities are equal, or a positive integer
* if node2 has more free capacity
*/
int
pcmk__compare_node_capacities(const pe_node_t *node1, const pe_node_t *node2)
{
struct compare_data data = {
.node1 = node1,
.node2 = node2,
.node2_only = false,
.result = 0,
};
// Compare utilization values that node1 and maybe node2 have
g_hash_table_foreach(node1->details->utilization, compare_utilization_value,
&data);
// Compare utilization values that only node2 has
data.node2_only = true;
g_hash_table_foreach(node2->details->utilization, compare_utilization_value,
&data);
return data.result;
}
/*
* Functions for updating node capacities
*/
struct calculate_data {
GHashTable *current_utilization;
bool plus;
};
/*!
* \internal
* \brief Update a single utilization attribute with a new value
*
* \param[in] key Name of utilization attribute to update
* \param[in] value Value to add or substract
* \param[in] user_data Calculation data (as struct calculate_data *)
*/
static void
update_utilization_value(gpointer key, gpointer value, gpointer user_data)
{
int result = 0;
const char *current = NULL;
struct calculate_data *data = user_data;
current = g_hash_table_lookup(data->current_utilization, key);
if (data->plus) {
result = utilization_value(current) + utilization_value(value);
} else if (current) {
result = utilization_value(current) - utilization_value(value);
}
g_hash_table_replace(data->current_utilization,
strdup(key), pcmk__itoa(result));
}
/*!
* \internal
* \brief Subtract a resource's utilization from node capacity
*
* \param[in] current_utilization Current node utilization attributes
* \param[in] rsc Resource with utilization to subtract
*/
void
pcmk__consume_node_capacity(GHashTable *current_utilization, pe_resource_t *rsc)
{
struct calculate_data data = {
.current_utilization = current_utilization,
.plus = false,
};
g_hash_table_foreach(rsc->utilization, update_utilization_value, &data);
}
/*!
* \internal
* \brief Add a resource's utilization to node capacity
*
* \param[in] current_utilization Current node utilization attributes
* \param[in] rsc Resource with utilization to add
*/
void
pcmk__release_node_capacity(GHashTable *current_utilization, pe_resource_t *rsc)
{
struct calculate_data data = {
.current_utilization = current_utilization,
.plus = true,
};
g_hash_table_foreach(rsc->utilization, update_utilization_value, &data);
}
/*
* Functions for checking for sufficient node capacity
*/
struct capacity_data {
pe_node_t *node;
const char *rsc_id;
bool is_enough;
};
/*!
* \internal
* \brief Check whether a single utilization attribute has sufficient capacity
*
* \param[in] key Name of utilization attribute to check
* \param[in] value Amount of utilization required
* \param[in] user_data Capacity data (as struct capacity_data *)
*/
static void
check_capacity(gpointer key, gpointer value, gpointer user_data)
{
int required = 0;
int remaining = 0;
const char *node_value_s = NULL;
struct capacity_data *data = user_data;
node_value_s = g_hash_table_lookup(data->node->details->utilization, key);
required = utilization_value(value);
remaining = utilization_value(node_value_s);
if (required > remaining) {
crm_debug("Remaining capacity for %s on %s (%d) is insufficient "
"for resource %s usage (%d)",
(const char *) key, data->node->details->uname, remaining,
data->rsc_id, required);
data->is_enough = false;
}
}
/*!
* \internal
* \brief Check whether a node has sufficient capacity for a resource
*
* \param[in] node Node to check
* \param[in] rsc_id ID of resource to check (for debug logs only)
* \param[in] utilization Required utilization amounts
*
* \return true if node has sufficient capacity for resource, otherwise false
*/
static bool
have_enough_capacity(pe_node_t *node, const char *rsc_id,
GHashTable *utilization)
{
struct capacity_data data = {
.node = node,
.rsc_id = rsc_id,
.is_enough = true,
};
g_hash_table_foreach(utilization, check_capacity, &data);
return data.is_enough;
}
/*!
* \internal
* \brief Sum the utilization requirements of a list of resources
*
* \param[in] orig_rsc Resource being allocated (for logging purposes)
* \param[in] rscs Resources whose utilization should be summed
*
* \return Newly allocated hash table with sum of all utilization values
* \note It is the caller's responsibility to free the return value using
* g_hash_table_destroy().
*/
static GHashTable *
sum_resource_utilization(pe_resource_t *orig_rsc, GList *rscs)
{
GHashTable *utilization = pcmk__strkey_table(free, free);
for (GList *iter = rscs; iter != NULL; iter = iter->next) {
pe_resource_t *rsc = (pe_resource_t *) iter->data;
rsc->cmds->add_utilization(rsc, orig_rsc, rscs, utilization);
}
return utilization;
}
/*!
* \internal
* \brief Ban resource from nodes with insufficient utilization capacity
*
* \param[in] rsc Resource to check
* \param[in,out] prefer Resource's preferred node (might be updated)
* \param[in] data_set Cluster working set
*/
void
pcmk__ban_insufficient_capacity(pe_resource_t *rsc, pe_node_t **prefer,
pe_working_set_t *data_set)
{
bool any_capable = false;
char *rscs_id = NULL;
pe_node_t *node = NULL;
pe_node_t *most_capable_node = NULL;
GList *colocated_rscs = NULL;
GHashTable *unallocated_utilization = NULL;
GHashTableIter iter;
CRM_CHECK((rsc != NULL) && (prefer != NULL) && (data_set != NULL), return);
// The default placement strategy ignores utilization
if (pcmk__str_eq(data_set->placement_strategy, "default",
pcmk__str_casei)) {
return;
}
// Check whether any resources are colocated with this one
colocated_rscs = rsc->cmds->colocated_resources(rsc, NULL, NULL);
if (colocated_rscs == NULL) {
return;
}
rscs_id = crm_strdup_printf("%s and its colocated resources", rsc->id);
// If rsc isn't in the list, add it so we include its utilization
if (g_list_find(colocated_rscs, rsc) == NULL) {
colocated_rscs = g_list_append(colocated_rscs, rsc);
}
// Sum utilization of colocated resources that haven't been allocated yet
unallocated_utilization = sum_resource_utilization(rsc, colocated_rscs);
// Check whether any node has enough capacity for all the resources
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (!pcmk__node_available(node) || (node->weight < 0)) {
continue;
}
if (have_enough_capacity(node, rscs_id, unallocated_utilization)) {
any_capable = true;
}
// Keep track of node with most free capacity
if ((most_capable_node == NULL)
|| (pcmk__compare_node_capacities(node, most_capable_node) < 0)) {
most_capable_node = node;
}
}
if (any_capable) {
// If so, ban resource from any node with insufficient capacity
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if ((node->weight >= 0) && pcmk__node_available(node)
&& !have_enough_capacity(node, rscs_id,
unallocated_utilization)) {
pe_rsc_debug(rsc, "%s does not have enough capacity for %s",
node->details->uname, rscs_id);
resource_location(rsc, node, -INFINITY, "__limit_utilization__",
data_set);
}
}
} else {
// Otherwise, ban from nodes with insufficient capacity for rsc alone
if (*prefer == NULL) {
*prefer = most_capable_node;
}
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if ((node->weight >= 0) && pcmk__node_available(node)
&& !have_enough_capacity(node, rsc->id, rsc->utilization)) {
pe_rsc_debug(rsc, "%s does not have enough capacity for %s",
node->details->uname, rsc->id);
resource_location(rsc, node, -INFINITY, "__limit_utilization__",
data_set);
}
}
}
g_hash_table_destroy(unallocated_utilization);
g_list_free(colocated_rscs);
free(rscs_id);
pe__show_node_weights(true, rsc, "Post-utilization",
rsc->allowed_nodes, data_set);
}
/*!
* \internal
* \brief Create a new load_stopped pseudo-op for a node
*
* \param[in] node Node to create op for
* \param[in] data_set Cluster working set
*
* \return Newly created load_stopped op
*/
static pe_action_t *
new_load_stopped_op(const pe_node_t *node, pe_working_set_t *data_set)
{
char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
node->details->uname);
pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
if (load_stopped->node == NULL) {
load_stopped->node = pe__copy_node(node);
pe__clear_action_flags(load_stopped, pe_action_optional);
}
free(load_stopped_task);
return load_stopped;
}
/*!
* \internal
* \brief Create utilization-related internal constraints for a resource
*
* \param[in] rsc Resource to create constraints for
* \param[in] allowed_nodes List of allowed next nodes for \p rsc
*/
void
pcmk__create_utilization_constraints(pe_resource_t *rsc, GList *allowed_nodes)
{
GList *iter = NULL;
pe_node_t *node = NULL;
pe_action_t *load_stopped = NULL;
pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
rsc->id, rsc->cluster->placement_strategy);
// "stop rsc then load_stopped" constraints for current nodes
for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
node = (pe_node_t *) iter->data;
load_stopped = new_load_stopped_op(node, rsc->cluster);
pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL, NULL, load_stopped,
pe_order_load, rsc->cluster);
}
// "load_stopped then start/migrate_to rsc" constraints for allowed nodes
for (GList *iter = allowed_nodes; iter; iter = iter->next) {
node = (pe_node_t *) iter->data;
load_stopped = new_load_stopped_op(node, rsc->cluster);
pcmk__new_ordering(NULL, NULL, load_stopped, rsc, start_key(rsc), NULL,
pe_order_load, rsc->cluster);
pcmk__new_ordering(NULL, NULL, load_stopped,
rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
pe_order_load, rsc->cluster);
}
}
+
+/*!
+ * \internal
+ * \brief Output node capacities if enabled
+ *
+ * \param[in] desc Prefix for output
+ * \param[in] data_set Cluster working set
+ */
+void
+pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set)
+{
+ if (!pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
+ return;
+ }
+ for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
+ pe_node_t *node = (pe_node_t *) iter->data;
+ pcmk__output_t *out = data_set->priv;
+
+ out->message(out, "node-capacity", node, desc);
+ }
+}